rucio 32.8.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of rucio might be problematic. Click here for more details.
- rucio/__init__.py +18 -0
- rucio/alembicrevision.py +16 -0
- rucio/api/__init__.py +14 -0
- rucio/api/account.py +266 -0
- rucio/api/account_limit.py +287 -0
- rucio/api/authentication.py +302 -0
- rucio/api/config.py +218 -0
- rucio/api/credential.py +60 -0
- rucio/api/did.py +726 -0
- rucio/api/dirac.py +71 -0
- rucio/api/exporter.py +60 -0
- rucio/api/heartbeat.py +62 -0
- rucio/api/identity.py +160 -0
- rucio/api/importer.py +46 -0
- rucio/api/lifetime_exception.py +95 -0
- rucio/api/lock.py +131 -0
- rucio/api/meta.py +85 -0
- rucio/api/permission.py +72 -0
- rucio/api/quarantined_replica.py +69 -0
- rucio/api/replica.py +528 -0
- rucio/api/request.py +220 -0
- rucio/api/rse.py +601 -0
- rucio/api/rule.py +335 -0
- rucio/api/scope.py +89 -0
- rucio/api/subscription.py +255 -0
- rucio/api/temporary_did.py +49 -0
- rucio/api/vo.py +112 -0
- rucio/client/__init__.py +16 -0
- rucio/client/accountclient.py +413 -0
- rucio/client/accountlimitclient.py +155 -0
- rucio/client/baseclient.py +929 -0
- rucio/client/client.py +77 -0
- rucio/client/configclient.py +113 -0
- rucio/client/credentialclient.py +54 -0
- rucio/client/didclient.py +691 -0
- rucio/client/diracclient.py +48 -0
- rucio/client/downloadclient.py +1674 -0
- rucio/client/exportclient.py +44 -0
- rucio/client/fileclient.py +51 -0
- rucio/client/importclient.py +42 -0
- rucio/client/lifetimeclient.py +74 -0
- rucio/client/lockclient.py +99 -0
- rucio/client/metaclient.py +137 -0
- rucio/client/pingclient.py +45 -0
- rucio/client/replicaclient.py +444 -0
- rucio/client/requestclient.py +109 -0
- rucio/client/rseclient.py +664 -0
- rucio/client/ruleclient.py +287 -0
- rucio/client/scopeclient.py +88 -0
- rucio/client/subscriptionclient.py +161 -0
- rucio/client/touchclient.py +78 -0
- rucio/client/uploadclient.py +871 -0
- rucio/common/__init__.py +14 -0
- rucio/common/cache.py +74 -0
- rucio/common/config.py +796 -0
- rucio/common/constants.py +92 -0
- rucio/common/constraints.py +18 -0
- rucio/common/didtype.py +187 -0
- rucio/common/dumper/__init__.py +306 -0
- rucio/common/dumper/consistency.py +449 -0
- rucio/common/dumper/data_models.py +325 -0
- rucio/common/dumper/path_parsing.py +65 -0
- rucio/common/exception.py +1092 -0
- rucio/common/extra.py +37 -0
- rucio/common/logging.py +404 -0
- rucio/common/pcache.py +1387 -0
- rucio/common/policy.py +84 -0
- rucio/common/schema/__init__.py +143 -0
- rucio/common/schema/atlas.py +411 -0
- rucio/common/schema/belleii.py +406 -0
- rucio/common/schema/cms.py +478 -0
- rucio/common/schema/domatpc.py +399 -0
- rucio/common/schema/escape.py +424 -0
- rucio/common/schema/generic.py +431 -0
- rucio/common/schema/generic_multi_vo.py +410 -0
- rucio/common/schema/icecube.py +404 -0
- rucio/common/schema/lsst.py +423 -0
- rucio/common/stomp_utils.py +160 -0
- rucio/common/stopwatch.py +56 -0
- rucio/common/test_rucio_server.py +148 -0
- rucio/common/types.py +158 -0
- rucio/common/utils.py +1946 -0
- rucio/core/__init__.py +14 -0
- rucio/core/account.py +426 -0
- rucio/core/account_counter.py +171 -0
- rucio/core/account_limit.py +357 -0
- rucio/core/authentication.py +563 -0
- rucio/core/config.py +386 -0
- rucio/core/credential.py +218 -0
- rucio/core/did.py +3102 -0
- rucio/core/did_meta_plugins/__init__.py +250 -0
- rucio/core/did_meta_plugins/did_column_meta.py +326 -0
- rucio/core/did_meta_plugins/did_meta_plugin_interface.py +116 -0
- rucio/core/did_meta_plugins/filter_engine.py +573 -0
- rucio/core/did_meta_plugins/json_meta.py +215 -0
- rucio/core/did_meta_plugins/mongo_meta.py +199 -0
- rucio/core/did_meta_plugins/postgres_meta.py +317 -0
- rucio/core/dirac.py +208 -0
- rucio/core/distance.py +164 -0
- rucio/core/exporter.py +59 -0
- rucio/core/heartbeat.py +263 -0
- rucio/core/identity.py +290 -0
- rucio/core/importer.py +248 -0
- rucio/core/lifetime_exception.py +377 -0
- rucio/core/lock.py +474 -0
- rucio/core/message.py +241 -0
- rucio/core/meta.py +190 -0
- rucio/core/monitor.py +441 -0
- rucio/core/naming_convention.py +154 -0
- rucio/core/nongrid_trace.py +124 -0
- rucio/core/oidc.py +1339 -0
- rucio/core/permission/__init__.py +107 -0
- rucio/core/permission/atlas.py +1333 -0
- rucio/core/permission/belleii.py +1076 -0
- rucio/core/permission/cms.py +1166 -0
- rucio/core/permission/escape.py +1076 -0
- rucio/core/permission/generic.py +1128 -0
- rucio/core/permission/generic_multi_vo.py +1148 -0
- rucio/core/quarantined_replica.py +190 -0
- rucio/core/replica.py +3627 -0
- rucio/core/replica_sorter.py +368 -0
- rucio/core/request.py +2241 -0
- rucio/core/rse.py +1835 -0
- rucio/core/rse_counter.py +155 -0
- rucio/core/rse_expression_parser.py +460 -0
- rucio/core/rse_selector.py +277 -0
- rucio/core/rule.py +3419 -0
- rucio/core/rule_grouping.py +1473 -0
- rucio/core/scope.py +152 -0
- rucio/core/subscription.py +316 -0
- rucio/core/temporary_did.py +188 -0
- rucio/core/topology.py +448 -0
- rucio/core/trace.py +361 -0
- rucio/core/transfer.py +1233 -0
- rucio/core/vo.py +151 -0
- rucio/core/volatile_replica.py +123 -0
- rucio/daemons/__init__.py +14 -0
- rucio/daemons/abacus/__init__.py +14 -0
- rucio/daemons/abacus/account.py +106 -0
- rucio/daemons/abacus/collection_replica.py +113 -0
- rucio/daemons/abacus/rse.py +107 -0
- rucio/daemons/atropos/__init__.py +14 -0
- rucio/daemons/atropos/atropos.py +243 -0
- rucio/daemons/auditor/__init__.py +261 -0
- rucio/daemons/auditor/hdfs.py +86 -0
- rucio/daemons/auditor/srmdumps.py +284 -0
- rucio/daemons/automatix/__init__.py +14 -0
- rucio/daemons/automatix/automatix.py +281 -0
- rucio/daemons/badreplicas/__init__.py +14 -0
- rucio/daemons/badreplicas/minos.py +311 -0
- rucio/daemons/badreplicas/minos_temporary_expiration.py +173 -0
- rucio/daemons/badreplicas/necromancer.py +200 -0
- rucio/daemons/bb8/__init__.py +14 -0
- rucio/daemons/bb8/bb8.py +356 -0
- rucio/daemons/bb8/common.py +762 -0
- rucio/daemons/bb8/nuclei_background_rebalance.py +147 -0
- rucio/daemons/bb8/t2_background_rebalance.py +146 -0
- rucio/daemons/c3po/__init__.py +14 -0
- rucio/daemons/c3po/algorithms/__init__.py +14 -0
- rucio/daemons/c3po/algorithms/simple.py +131 -0
- rucio/daemons/c3po/algorithms/t2_free_space.py +125 -0
- rucio/daemons/c3po/algorithms/t2_free_space_only_pop.py +127 -0
- rucio/daemons/c3po/algorithms/t2_free_space_only_pop_with_network.py +279 -0
- rucio/daemons/c3po/c3po.py +342 -0
- rucio/daemons/c3po/collectors/__init__.py +14 -0
- rucio/daemons/c3po/collectors/agis.py +108 -0
- rucio/daemons/c3po/collectors/free_space.py +62 -0
- rucio/daemons/c3po/collectors/jedi_did.py +48 -0
- rucio/daemons/c3po/collectors/mock_did.py +46 -0
- rucio/daemons/c3po/collectors/network_metrics.py +63 -0
- rucio/daemons/c3po/collectors/workload.py +110 -0
- rucio/daemons/c3po/utils/__init__.py +14 -0
- rucio/daemons/c3po/utils/dataset_cache.py +40 -0
- rucio/daemons/c3po/utils/expiring_dataset_cache.py +45 -0
- rucio/daemons/c3po/utils/expiring_list.py +63 -0
- rucio/daemons/c3po/utils/popularity.py +82 -0
- rucio/daemons/c3po/utils/timeseries.py +76 -0
- rucio/daemons/cache/__init__.py +14 -0
- rucio/daemons/cache/consumer.py +191 -0
- rucio/daemons/common.py +391 -0
- rucio/daemons/conveyor/__init__.py +14 -0
- rucio/daemons/conveyor/common.py +530 -0
- rucio/daemons/conveyor/finisher.py +492 -0
- rucio/daemons/conveyor/poller.py +372 -0
- rucio/daemons/conveyor/preparer.py +198 -0
- rucio/daemons/conveyor/receiver.py +206 -0
- rucio/daemons/conveyor/stager.py +127 -0
- rucio/daemons/conveyor/submitter.py +379 -0
- rucio/daemons/conveyor/throttler.py +468 -0
- rucio/daemons/follower/__init__.py +14 -0
- rucio/daemons/follower/follower.py +97 -0
- rucio/daemons/hermes/__init__.py +14 -0
- rucio/daemons/hermes/hermes.py +738 -0
- rucio/daemons/judge/__init__.py +14 -0
- rucio/daemons/judge/cleaner.py +149 -0
- rucio/daemons/judge/evaluator.py +172 -0
- rucio/daemons/judge/injector.py +154 -0
- rucio/daemons/judge/repairer.py +144 -0
- rucio/daemons/oauthmanager/__init__.py +14 -0
- rucio/daemons/oauthmanager/oauthmanager.py +199 -0
- rucio/daemons/reaper/__init__.py +14 -0
- rucio/daemons/reaper/dark_reaper.py +272 -0
- rucio/daemons/reaper/light_reaper.py +255 -0
- rucio/daemons/reaper/reaper.py +701 -0
- rucio/daemons/replicarecoverer/__init__.py +14 -0
- rucio/daemons/replicarecoverer/suspicious_replica_recoverer.py +487 -0
- rucio/daemons/storage/__init__.py +14 -0
- rucio/daemons/storage/consistency/__init__.py +14 -0
- rucio/daemons/storage/consistency/actions.py +753 -0
- rucio/daemons/tracer/__init__.py +14 -0
- rucio/daemons/tracer/kronos.py +513 -0
- rucio/daemons/transmogrifier/__init__.py +14 -0
- rucio/daemons/transmogrifier/transmogrifier.py +753 -0
- rucio/daemons/undertaker/__init__.py +14 -0
- rucio/daemons/undertaker/undertaker.py +137 -0
- rucio/db/__init__.py +14 -0
- rucio/db/sqla/__init__.py +38 -0
- rucio/db/sqla/constants.py +192 -0
- rucio/db/sqla/migrate_repo/__init__.py +14 -0
- rucio/db/sqla/migrate_repo/env.py +111 -0
- rucio/db/sqla/migrate_repo/versions/01eaf73ab656_add_new_rule_notification_state_progress.py +71 -0
- rucio/db/sqla/migrate_repo/versions/0437a40dbfd1_add_eol_at_in_rules.py +50 -0
- rucio/db/sqla/migrate_repo/versions/0f1adb7a599a_create_transfer_hops_table.py +61 -0
- rucio/db/sqla/migrate_repo/versions/102efcf145f4_added_stuck_at_column_to_rules.py +46 -0
- rucio/db/sqla/migrate_repo/versions/13d4f70c66a9_introduce_transfer_limits.py +93 -0
- rucio/db/sqla/migrate_repo/versions/140fef722e91_cleanup_distances_table.py +78 -0
- rucio/db/sqla/migrate_repo/versions/14ec5aeb64cf_add_request_external_host.py +46 -0
- rucio/db/sqla/migrate_repo/versions/156fb5b5a14_add_request_type_to_requests_idx.py +53 -0
- rucio/db/sqla/migrate_repo/versions/1677d4d803c8_split_rse_availability_into_multiple.py +69 -0
- rucio/db/sqla/migrate_repo/versions/16a0aca82e12_create_index_on_table_replicas_path.py +42 -0
- rucio/db/sqla/migrate_repo/versions/1803333ac20f_adding_provenance_and_phys_group.py +46 -0
- rucio/db/sqla/migrate_repo/versions/1a29d6a9504c_add_didtype_chck_to_requests.py +61 -0
- rucio/db/sqla/migrate_repo/versions/1a80adff031a_create_index_on_rules_hist_recent.py +42 -0
- rucio/db/sqla/migrate_repo/versions/1c45d9730ca6_increase_identity_length.py +141 -0
- rucio/db/sqla/migrate_repo/versions/1d1215494e95_add_quarantined_replicas_table.py +75 -0
- rucio/db/sqla/migrate_repo/versions/1d96f484df21_asynchronous_rules_and_rule_approval.py +75 -0
- rucio/db/sqla/migrate_repo/versions/1f46c5f240ac_add_bytes_column_to_bad_replicas.py +46 -0
- rucio/db/sqla/migrate_repo/versions/1fc15ab60d43_add_message_history_table.py +51 -0
- rucio/db/sqla/migrate_repo/versions/2190e703eb6e_move_rse_settings_to_rse_attributes.py +135 -0
- rucio/db/sqla/migrate_repo/versions/21d6b9dc9961_add_mismatch_scheme_state_to_requests.py +65 -0
- rucio/db/sqla/migrate_repo/versions/22cf51430c78_add_availability_column_to_table_rses.py +42 -0
- rucio/db/sqla/migrate_repo/versions/22d887e4ec0a_create_sources_table.py +66 -0
- rucio/db/sqla/migrate_repo/versions/25821a8a45a3_remove_unique_constraint_on_requests.py +54 -0
- rucio/db/sqla/migrate_repo/versions/25fc855625cf_added_unique_constraint_to_rules.py +43 -0
- rucio/db/sqla/migrate_repo/versions/269fee20dee9_add_repair_cnt_to_locks.py +46 -0
- rucio/db/sqla/migrate_repo/versions/271a46ea6244_add_ignore_availability_column_to_rules.py +47 -0
- rucio/db/sqla/migrate_repo/versions/277b5fbb41d3_switch_heartbeats_executable.py +54 -0
- rucio/db/sqla/migrate_repo/versions/27e3a68927fb_remove_replicas_tombstone_and_replicas_.py +39 -0
- rucio/db/sqla/migrate_repo/versions/2854cd9e168_added_rule_id_column.py +48 -0
- rucio/db/sqla/migrate_repo/versions/295289b5a800_processed_by_and__at_in_requests.py +47 -0
- rucio/db/sqla/migrate_repo/versions/2962ece31cf4_add_nbaccesses_column_in_the_did_table.py +48 -0
- rucio/db/sqla/migrate_repo/versions/2af3291ec4c_added_replicas_history_table.py +59 -0
- rucio/db/sqla/migrate_repo/versions/2b69addda658_add_columns_for_third_party_copy_read_.py +47 -0
- rucio/db/sqla/migrate_repo/versions/2b8e7bcb4783_add_config_table.py +72 -0
- rucio/db/sqla/migrate_repo/versions/2ba5229cb54c_add_submitted_at_to_requests_table.py +46 -0
- rucio/db/sqla/migrate_repo/versions/2cbee484dcf9_added_column_volume_to_rse_transfer_.py +45 -0
- rucio/db/sqla/migrate_repo/versions/2edee4a83846_add_source_to_requests_and_requests_.py +48 -0
- rucio/db/sqla/migrate_repo/versions/2eef46be23d4_change_tokens_pk.py +48 -0
- rucio/db/sqla/migrate_repo/versions/2f648fc909f3_index_in_rule_history_on_scope_name.py +42 -0
- rucio/db/sqla/migrate_repo/versions/3082b8cef557_add_naming_convention_table_and_closed_.py +69 -0
- rucio/db/sqla/migrate_repo/versions/30fa38b6434e_add_index_on_service_column_in_the_message_table.py +46 -0
- rucio/db/sqla/migrate_repo/versions/3152492b110b_added_staging_area_column.py +78 -0
- rucio/db/sqla/migrate_repo/versions/32c7d2783f7e_create_bad_replicas_table.py +62 -0
- rucio/db/sqla/migrate_repo/versions/3345511706b8_replicas_table_pk_definition_is_in_.py +74 -0
- rucio/db/sqla/migrate_repo/versions/35ef10d1e11b_change_index_on_table_requests.py +44 -0
- rucio/db/sqla/migrate_repo/versions/379a19b5332d_create_rse_limits_table.py +67 -0
- rucio/db/sqla/migrate_repo/versions/384b96aa0f60_created_rule_history_tables.py +134 -0
- rucio/db/sqla/migrate_repo/versions/3ac1660a1a72_extend_distance_table.py +58 -0
- rucio/db/sqla/migrate_repo/versions/3ad36e2268b0_create_collection_replicas_updates_table.py +79 -0
- rucio/db/sqla/migrate_repo/versions/3c9df354071b_extend_waiting_request_state.py +61 -0
- rucio/db/sqla/migrate_repo/versions/3d9813fab443_add_a_new_state_lost_in_badfilesstatus.py +45 -0
- rucio/db/sqla/migrate_repo/versions/40ad39ce3160_add_transferred_at_to_requests_table.py +46 -0
- rucio/db/sqla/migrate_repo/versions/4207be2fd914_add_notification_column_to_rules.py +65 -0
- rucio/db/sqla/migrate_repo/versions/42db2617c364_create_index_on_requests_external_id.py +42 -0
- rucio/db/sqla/migrate_repo/versions/436827b13f82_added_column_activity_to_table_requests.py +46 -0
- rucio/db/sqla/migrate_repo/versions/44278720f774_update_requests_typ_sta_upd_idx_index.py +46 -0
- rucio/db/sqla/migrate_repo/versions/45378a1e76a8_create_collection_replica_table.py +80 -0
- rucio/db/sqla/migrate_repo/versions/469d262be19_removing_created_at_index.py +43 -0
- rucio/db/sqla/migrate_repo/versions/4783c1f49cb4_create_distance_table.py +61 -0
- rucio/db/sqla/migrate_repo/versions/49a21b4d4357_create_index_on_table_tokens.py +47 -0
- rucio/db/sqla/migrate_repo/versions/4a2cbedda8b9_add_source_replica_expression_column_to_.py +46 -0
- rucio/db/sqla/migrate_repo/versions/4a7182d9578b_added_bytes_length_accessed_at_columns.py +52 -0
- rucio/db/sqla/migrate_repo/versions/4bab9edd01fc_create_index_on_requests_rule_id.py +42 -0
- rucio/db/sqla/migrate_repo/versions/4c3a4acfe006_new_attr_account_table.py +65 -0
- rucio/db/sqla/migrate_repo/versions/4cf0a2e127d4_adding_transient_metadata.py +46 -0
- rucio/db/sqla/migrate_repo/versions/50280c53117c_add_qos_class_to_rse.py +47 -0
- rucio/db/sqla/migrate_repo/versions/52153819589c_add_rse_id_to_replicas_table.py +45 -0
- rucio/db/sqla/migrate_repo/versions/52fd9f4916fa_added_activity_to_rules.py +46 -0
- rucio/db/sqla/migrate_repo/versions/53b479c3cb0f_fix_did_meta_table_missing_updated_at_.py +48 -0
- rucio/db/sqla/migrate_repo/versions/5673b4b6e843_add_wfms_metadata_to_rule_tables.py +50 -0
- rucio/db/sqla/migrate_repo/versions/575767d9f89_added_source_history_table.py +59 -0
- rucio/db/sqla/migrate_repo/versions/58bff7008037_add_started_at_to_requests.py +48 -0
- rucio/db/sqla/migrate_repo/versions/58c8b78301ab_rename_callback_to_message.py +108 -0
- rucio/db/sqla/migrate_repo/versions/5f139f77382a_added_child_rule_id_column.py +57 -0
- rucio/db/sqla/migrate_repo/versions/688ef1840840_adding_did_meta_table.py +51 -0
- rucio/db/sqla/migrate_repo/versions/6e572a9bfbf3_add_new_split_container_column_to_rules.py +50 -0
- rucio/db/sqla/migrate_repo/versions/70587619328_add_comment_column_for_subscriptions.py +46 -0
- rucio/db/sqla/migrate_repo/versions/739064d31565_remove_history_table_pks.py +42 -0
- rucio/db/sqla/migrate_repo/versions/7541902bf173_add_didsfollowed_and_followevents_table.py +93 -0
- rucio/db/sqla/migrate_repo/versions/7ec22226cdbf_new_replica_state_for_temporary_.py +73 -0
- rucio/db/sqla/migrate_repo/versions/810a41685bc1_added_columns_rse_transfer_limits.py +52 -0
- rucio/db/sqla/migrate_repo/versions/83f991c63a93_correct_rse_expression_length.py +45 -0
- rucio/db/sqla/migrate_repo/versions/8523998e2e76_increase_size_of_extended_attributes_.py +46 -0
- rucio/db/sqla/migrate_repo/versions/8ea9122275b1_adding_missing_function_based_indices.py +54 -0
- rucio/db/sqla/migrate_repo/versions/90f47792bb76_add_clob_payload_to_messages.py +48 -0
- rucio/db/sqla/migrate_repo/versions/914b8f02df38_new_table_for_lifetime_model_exceptions.py +70 -0
- rucio/db/sqla/migrate_repo/versions/94a5961ddbf2_add_estimator_columns.py +48 -0
- rucio/db/sqla/migrate_repo/versions/9a1b149a2044_add_saml_identity_type.py +95 -0
- rucio/db/sqla/migrate_repo/versions/9a45bc4ea66d_add_vp_table.py +55 -0
- rucio/db/sqla/migrate_repo/versions/9eb936a81eb1_true_is_true.py +74 -0
- rucio/db/sqla/migrate_repo/versions/a118956323f8_added_vo_table_and_vo_col_to_rse.py +78 -0
- rucio/db/sqla/migrate_repo/versions/a193a275255c_add_status_column_in_messages.py +49 -0
- rucio/db/sqla/migrate_repo/versions/a5f6f6e928a7_1_7_0.py +124 -0
- rucio/db/sqla/migrate_repo/versions/a616581ee47_added_columns_to_table_requests.py +60 -0
- rucio/db/sqla/migrate_repo/versions/a6eb23955c28_state_idx_non_functional.py +53 -0
- rucio/db/sqla/migrate_repo/versions/a74275a1ad30_added_global_quota_table.py +56 -0
- rucio/db/sqla/migrate_repo/versions/a93e4e47bda_heartbeats.py +67 -0
- rucio/db/sqla/migrate_repo/versions/ae2a56fcc89_added_comment_column_to_rules.py +50 -0
- rucio/db/sqla/migrate_repo/versions/b4293a99f344_added_column_identity_to_table_tokens.py +46 -0
- rucio/db/sqla/migrate_repo/versions/b7d287de34fd_removal_of_replicastate_source.py +92 -0
- rucio/db/sqla/migrate_repo/versions/b818052fa670_add_index_to_quarantined_replicas.py +42 -0
- rucio/db/sqla/migrate_repo/versions/b8caac94d7f0_add_comments_column_for_subscriptions_.py +46 -0
- rucio/db/sqla/migrate_repo/versions/b96a1c7e1cc4_new_bad_pfns_table_and_bad_replicas_.py +147 -0
- rucio/db/sqla/migrate_repo/versions/bb695f45c04_extend_request_state.py +78 -0
- rucio/db/sqla/migrate_repo/versions/bc68e9946deb_add_staging_timestamps_to_request.py +53 -0
- rucio/db/sqla/migrate_repo/versions/bf3baa1c1474_correct_pk_and_idx_for_history_tables.py +74 -0
- rucio/db/sqla/migrate_repo/versions/c0937668555f_add_qos_policy_map_table.py +56 -0
- rucio/db/sqla/migrate_repo/versions/c129ccdb2d5_add_lumiblocknr_to_dids.py +46 -0
- rucio/db/sqla/migrate_repo/versions/ccdbcd48206e_add_did_type_column_index_on_did_meta_.py +68 -0
- rucio/db/sqla/migrate_repo/versions/cebad904c4dd_new_payload_column_for_heartbeats.py +48 -0
- rucio/db/sqla/migrate_repo/versions/d1189a09c6e0_oauth2_0_and_jwt_feature_support_adding_.py +149 -0
- rucio/db/sqla/migrate_repo/versions/d23453595260_extend_request_state_for_preparer.py +106 -0
- rucio/db/sqla/migrate_repo/versions/d6dceb1de2d_added_purge_column_to_rules.py +47 -0
- rucio/db/sqla/migrate_repo/versions/d6e2c3b2cf26_remove_third_party_copy_column_from_rse.py +45 -0
- rucio/db/sqla/migrate_repo/versions/d91002c5841_new_account_limits_table.py +105 -0
- rucio/db/sqla/migrate_repo/versions/e138c364ebd0_extending_columns_for_filter_and_.py +52 -0
- rucio/db/sqla/migrate_repo/versions/e59300c8b179_support_for_archive.py +106 -0
- rucio/db/sqla/migrate_repo/versions/f1b14a8c2ac1_postgres_use_check_constraints.py +30 -0
- rucio/db/sqla/migrate_repo/versions/f41ffe206f37_oracle_global_temporary_tables.py +75 -0
- rucio/db/sqla/migrate_repo/versions/f85a2962b021_adding_transfertool_column_to_requests_.py +49 -0
- rucio/db/sqla/migrate_repo/versions/fa7a7d78b602_increase_refresh_token_size.py +45 -0
- rucio/db/sqla/migrate_repo/versions/fb28a95fe288_add_replicas_rse_id_tombstone_idx.py +38 -0
- rucio/db/sqla/migrate_repo/versions/fe1a65b176c9_set_third_party_copy_read_and_write_.py +44 -0
- rucio/db/sqla/migrate_repo/versions/fe8ea2fa9788_added_third_party_copy_column_to_rse_.py +46 -0
- rucio/db/sqla/models.py +1834 -0
- rucio/db/sqla/sautils.py +48 -0
- rucio/db/sqla/session.py +470 -0
- rucio/db/sqla/types.py +207 -0
- rucio/db/sqla/util.py +521 -0
- rucio/rse/__init__.py +97 -0
- rucio/rse/protocols/__init__.py +14 -0
- rucio/rse/protocols/cache.py +123 -0
- rucio/rse/protocols/dummy.py +112 -0
- rucio/rse/protocols/gfal.py +701 -0
- rucio/rse/protocols/globus.py +243 -0
- rucio/rse/protocols/gsiftp.py +93 -0
- rucio/rse/protocols/http_cache.py +83 -0
- rucio/rse/protocols/mock.py +124 -0
- rucio/rse/protocols/ngarc.py +210 -0
- rucio/rse/protocols/posix.py +251 -0
- rucio/rse/protocols/protocol.py +530 -0
- rucio/rse/protocols/rclone.py +365 -0
- rucio/rse/protocols/rfio.py +137 -0
- rucio/rse/protocols/srm.py +339 -0
- rucio/rse/protocols/ssh.py +414 -0
- rucio/rse/protocols/storm.py +207 -0
- rucio/rse/protocols/webdav.py +547 -0
- rucio/rse/protocols/xrootd.py +295 -0
- rucio/rse/rsemanager.py +752 -0
- rucio/tests/__init__.py +14 -0
- rucio/tests/common.py +244 -0
- rucio/tests/common_server.py +132 -0
- rucio/transfertool/__init__.py +14 -0
- rucio/transfertool/fts3.py +1484 -0
- rucio/transfertool/globus.py +200 -0
- rucio/transfertool/globus_library.py +182 -0
- rucio/transfertool/mock.py +81 -0
- rucio/transfertool/transfertool.py +212 -0
- rucio/vcsversion.py +11 -0
- rucio/version.py +46 -0
- rucio/web/__init__.py +14 -0
- rucio/web/rest/__init__.py +14 -0
- rucio/web/rest/flaskapi/__init__.py +14 -0
- rucio/web/rest/flaskapi/authenticated_bp.py +28 -0
- rucio/web/rest/flaskapi/v1/__init__.py +14 -0
- rucio/web/rest/flaskapi/v1/accountlimits.py +234 -0
- rucio/web/rest/flaskapi/v1/accounts.py +1088 -0
- rucio/web/rest/flaskapi/v1/archives.py +100 -0
- rucio/web/rest/flaskapi/v1/auth.py +1642 -0
- rucio/web/rest/flaskapi/v1/common.py +385 -0
- rucio/web/rest/flaskapi/v1/config.py +305 -0
- rucio/web/rest/flaskapi/v1/credentials.py +213 -0
- rucio/web/rest/flaskapi/v1/dids.py +2204 -0
- rucio/web/rest/flaskapi/v1/dirac.py +116 -0
- rucio/web/rest/flaskapi/v1/export.py +77 -0
- rucio/web/rest/flaskapi/v1/heartbeats.py +129 -0
- rucio/web/rest/flaskapi/v1/identities.py +263 -0
- rucio/web/rest/flaskapi/v1/import.py +133 -0
- rucio/web/rest/flaskapi/v1/lifetime_exceptions.py +315 -0
- rucio/web/rest/flaskapi/v1/locks.py +360 -0
- rucio/web/rest/flaskapi/v1/main.py +83 -0
- rucio/web/rest/flaskapi/v1/meta.py +226 -0
- rucio/web/rest/flaskapi/v1/metrics.py +37 -0
- rucio/web/rest/flaskapi/v1/nongrid_traces.py +97 -0
- rucio/web/rest/flaskapi/v1/ping.py +89 -0
- rucio/web/rest/flaskapi/v1/redirect.py +366 -0
- rucio/web/rest/flaskapi/v1/replicas.py +1866 -0
- rucio/web/rest/flaskapi/v1/requests.py +841 -0
- rucio/web/rest/flaskapi/v1/rses.py +2204 -0
- rucio/web/rest/flaskapi/v1/rules.py +824 -0
- rucio/web/rest/flaskapi/v1/scopes.py +161 -0
- rucio/web/rest/flaskapi/v1/subscriptions.py +646 -0
- rucio/web/rest/flaskapi/v1/templates/auth_crash.html +80 -0
- rucio/web/rest/flaskapi/v1/templates/auth_granted.html +82 -0
- rucio/web/rest/flaskapi/v1/tmp_dids.py +115 -0
- rucio/web/rest/flaskapi/v1/traces.py +100 -0
- rucio/web/rest/flaskapi/v1/vos.py +280 -0
- rucio/web/rest/main.py +19 -0
- rucio/web/rest/metrics.py +28 -0
- rucio-32.8.6.data/data/rucio/etc/alembic.ini.template +71 -0
- rucio-32.8.6.data/data/rucio/etc/alembic_offline.ini.template +74 -0
- rucio-32.8.6.data/data/rucio/etc/globus-config.yml.template +5 -0
- rucio-32.8.6.data/data/rucio/etc/ldap.cfg.template +30 -0
- rucio-32.8.6.data/data/rucio/etc/mail_templates/rule_approval_request.tmpl +38 -0
- rucio-32.8.6.data/data/rucio/etc/mail_templates/rule_approved_admin.tmpl +4 -0
- rucio-32.8.6.data/data/rucio/etc/mail_templates/rule_approved_user.tmpl +17 -0
- rucio-32.8.6.data/data/rucio/etc/mail_templates/rule_denied_admin.tmpl +6 -0
- rucio-32.8.6.data/data/rucio/etc/mail_templates/rule_denied_user.tmpl +17 -0
- rucio-32.8.6.data/data/rucio/etc/mail_templates/rule_ok_notification.tmpl +19 -0
- rucio-32.8.6.data/data/rucio/etc/rse-accounts.cfg.template +25 -0
- rucio-32.8.6.data/data/rucio/etc/rucio.cfg.atlas.client.template +42 -0
- rucio-32.8.6.data/data/rucio/etc/rucio.cfg.template +257 -0
- rucio-32.8.6.data/data/rucio/etc/rucio_multi_vo.cfg.template +234 -0
- rucio-32.8.6.data/data/rucio/requirements.txt +55 -0
- rucio-32.8.6.data/data/rucio/tools/bootstrap.py +34 -0
- rucio-32.8.6.data/data/rucio/tools/merge_rucio_configs.py +147 -0
- rucio-32.8.6.data/data/rucio/tools/reset_database.py +40 -0
- rucio-32.8.6.data/scripts/rucio +2540 -0
- rucio-32.8.6.data/scripts/rucio-abacus-account +75 -0
- rucio-32.8.6.data/scripts/rucio-abacus-collection-replica +47 -0
- rucio-32.8.6.data/scripts/rucio-abacus-rse +79 -0
- rucio-32.8.6.data/scripts/rucio-admin +2434 -0
- rucio-32.8.6.data/scripts/rucio-atropos +61 -0
- rucio-32.8.6.data/scripts/rucio-auditor +199 -0
- rucio-32.8.6.data/scripts/rucio-automatix +51 -0
- rucio-32.8.6.data/scripts/rucio-bb8 +58 -0
- rucio-32.8.6.data/scripts/rucio-c3po +86 -0
- rucio-32.8.6.data/scripts/rucio-cache-client +135 -0
- rucio-32.8.6.data/scripts/rucio-cache-consumer +43 -0
- rucio-32.8.6.data/scripts/rucio-conveyor-finisher +59 -0
- rucio-32.8.6.data/scripts/rucio-conveyor-poller +67 -0
- rucio-32.8.6.data/scripts/rucio-conveyor-preparer +38 -0
- rucio-32.8.6.data/scripts/rucio-conveyor-receiver +44 -0
- rucio-32.8.6.data/scripts/rucio-conveyor-stager +77 -0
- rucio-32.8.6.data/scripts/rucio-conveyor-submitter +140 -0
- rucio-32.8.6.data/scripts/rucio-conveyor-throttler +105 -0
- rucio-32.8.6.data/scripts/rucio-dark-reaper +54 -0
- rucio-32.8.6.data/scripts/rucio-dumper +159 -0
- rucio-32.8.6.data/scripts/rucio-follower +45 -0
- rucio-32.8.6.data/scripts/rucio-hermes +55 -0
- rucio-32.8.6.data/scripts/rucio-judge-cleaner +90 -0
- rucio-32.8.6.data/scripts/rucio-judge-evaluator +138 -0
- rucio-32.8.6.data/scripts/rucio-judge-injector +45 -0
- rucio-32.8.6.data/scripts/rucio-judge-repairer +45 -0
- rucio-32.8.6.data/scripts/rucio-kronos +45 -0
- rucio-32.8.6.data/scripts/rucio-light-reaper +53 -0
- rucio-32.8.6.data/scripts/rucio-minos +54 -0
- rucio-32.8.6.data/scripts/rucio-minos-temporary-expiration +51 -0
- rucio-32.8.6.data/scripts/rucio-necromancer +121 -0
- rucio-32.8.6.data/scripts/rucio-oauth-manager +64 -0
- rucio-32.8.6.data/scripts/rucio-reaper +84 -0
- rucio-32.8.6.data/scripts/rucio-replica-recoverer +249 -0
- rucio-32.8.6.data/scripts/rucio-storage-consistency-actions +75 -0
- rucio-32.8.6.data/scripts/rucio-transmogrifier +78 -0
- rucio-32.8.6.data/scripts/rucio-undertaker +77 -0
- rucio-32.8.6.dist-info/METADATA +83 -0
- rucio-32.8.6.dist-info/RECORD +481 -0
- rucio-32.8.6.dist-info/WHEEL +5 -0
- rucio-32.8.6.dist-info/licenses/AUTHORS.rst +94 -0
- rucio-32.8.6.dist-info/licenses/LICENSE +201 -0
- rucio-32.8.6.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,1484 @@
|
|
|
1
|
+
# -*- coding: utf-8 -*-
|
|
2
|
+
# Copyright European Organization for Nuclear Research (CERN) since 2012
|
|
3
|
+
#
|
|
4
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
5
|
+
# you may not use this file except in compliance with the License.
|
|
6
|
+
# You may obtain a copy of the License at
|
|
7
|
+
#
|
|
8
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
9
|
+
#
|
|
10
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
11
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
12
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
13
|
+
# See the License for the specific language governing permissions and
|
|
14
|
+
# limitations under the License.
|
|
15
|
+
|
|
16
|
+
import datetime
|
|
17
|
+
import json
|
|
18
|
+
import logging
|
|
19
|
+
import pathlib
|
|
20
|
+
import traceback
|
|
21
|
+
import uuid
|
|
22
|
+
from collections.abc import Callable
|
|
23
|
+
from configparser import NoOptionError, NoSectionError
|
|
24
|
+
from json import loads
|
|
25
|
+
from typing import Any, Optional, TYPE_CHECKING
|
|
26
|
+
from urllib.parse import urlparse
|
|
27
|
+
|
|
28
|
+
import requests
|
|
29
|
+
from dogpile.cache.api import NoValue
|
|
30
|
+
from requests.adapters import ReadTimeout
|
|
31
|
+
from requests.packages.urllib3 import disable_warnings # pylint: disable=import-error
|
|
32
|
+
|
|
33
|
+
from rucio.common.cache import make_region_memcached
|
|
34
|
+
from rucio.common.config import config_get, config_get_bool, config_get_int
|
|
35
|
+
from rucio.common.constants import FTS_JOB_TYPE, FTS_STATE, FTS_COMPLETE_STATE
|
|
36
|
+
from rucio.common.exception import TransferToolTimeout, TransferToolWrongAnswer, DuplicateFileTransferSubmission
|
|
37
|
+
from rucio.common.stopwatch import Stopwatch
|
|
38
|
+
from rucio.common.utils import APIEncoder, chunks, PREFERRED_CHECKSUM
|
|
39
|
+
from rucio.core.monitor import MetricManager
|
|
40
|
+
from rucio.core.oidc import get_token_for_account_operation
|
|
41
|
+
from rucio.core.request import get_source_rse, get_transfer_error
|
|
42
|
+
from rucio.core.rse import get_rse_supported_checksums_from_attributes
|
|
43
|
+
from rucio.db.sqla.constants import RequestState
|
|
44
|
+
from rucio.transfertool.transfertool import Transfertool, TransferToolBuilder, TransferStatusReport
|
|
45
|
+
|
|
46
|
+
if TYPE_CHECKING:
|
|
47
|
+
from rucio.core.transfer import DirectTransferDefinition
|
|
48
|
+
from rucio.core.rse import RseData
|
|
49
|
+
|
|
50
|
+
logging.getLogger("requests").setLevel(logging.CRITICAL)
|
|
51
|
+
disable_warnings()
|
|
52
|
+
|
|
53
|
+
REGION_SHORT = make_region_memcached(expiration_time=900)
|
|
54
|
+
METRICS = MetricManager(module=__name__)
|
|
55
|
+
|
|
56
|
+
SUBMISSION_COUNTER = METRICS.counter(name='{host}.submission.{state}',
|
|
57
|
+
documentation='Number of transfers submitted', labelnames=('state', 'host'))
|
|
58
|
+
CANCEL_COUNTER = METRICS.counter(name='{host}.cancel.{state}',
|
|
59
|
+
documentation='Number of cancelled transfers', labelnames=('state', 'host'))
|
|
60
|
+
UPDATE_PRIORITY_COUNTER = METRICS.counter(name='{host}.update_priority.{state}',
|
|
61
|
+
documentation='Number of priority updates', labelnames=('state', 'host'))
|
|
62
|
+
QUERY_COUNTER = METRICS.counter(name='{host}.query.{state}',
|
|
63
|
+
documentation='Number of queried transfers', labelnames=('state', 'host'))
|
|
64
|
+
WHOAMI_COUNTER = METRICS.counter(name='{host}.whoami.{state}',
|
|
65
|
+
documentation='Number of whoami requests', labelnames=('state', 'host'))
|
|
66
|
+
VERSION_COUNTER = METRICS.counter(name='{host}.version.{state}',
|
|
67
|
+
documentation='Number of version requests', labelnames=('state', 'host'))
|
|
68
|
+
BULK_QUERY_COUNTER = METRICS.counter(name='{host}.bulk_query.{state}',
|
|
69
|
+
documentation='Number of bulk queries', labelnames=('state', 'host'))
|
|
70
|
+
QUERY_DETAILS_COUNTER = METRICS.counter(name='{host}.query_details.{state}',
|
|
71
|
+
documentation='Number of detailed status queries', labelnames=('state', 'host'))
|
|
72
|
+
|
|
73
|
+
ALLOW_USER_OIDC_TOKENS = config_get_bool('conveyor', 'allow_user_oidc_tokens', False, False)
|
|
74
|
+
REQUEST_OIDC_SCOPE = config_get('conveyor', 'request_oidc_scope', False, 'fts:submit-transfer')
|
|
75
|
+
REQUEST_OIDC_AUDIENCE = config_get('conveyor', 'request_oidc_audience', False, 'fts:example')
|
|
76
|
+
REWRITE_HTTPS_TO_DAVS = config_get_bool('transfers', 'rewrite_https_to_davs', default=False)
|
|
77
|
+
VO_CERTS_PATH = config_get('conveyor', 'vo_certs_path', False, None)
|
|
78
|
+
|
|
79
|
+
# https://fts3-docs.web.cern.ch/fts3-docs/docs/state_machine.html
|
|
80
|
+
FINAL_FTS_JOB_STATES = (FTS_STATE.FAILED, FTS_STATE.CANCELED, FTS_STATE.FINISHED, FTS_STATE.FINISHEDDIRTY)
|
|
81
|
+
FINAL_FTS_FILE_STATES = (FTS_STATE.FAILED, FTS_STATE.CANCELED, FTS_STATE.FINISHED, FTS_STATE.NOT_USED)
|
|
82
|
+
|
|
83
|
+
# In a multi-hop transfer, we must compute a checksum validation strategy valid for the whole path.
|
|
84
|
+
# This state-machine defines how strategies of hops are merged into a path-wide strategy.
|
|
85
|
+
# For example, if HOP1 supports only validation of checksum at source while HOP2 only
|
|
86
|
+
# supports validation at destination, the strategy for the whole path MUST be "none". Otherwise,
|
|
87
|
+
# transfers will fail when FTS will try to validate the checksum.
|
|
88
|
+
PATH_CHECKSUM_VALIDATION_STRATEGY: dict[tuple[str, str], str] = {
|
|
89
|
+
('both', 'both'): 'both',
|
|
90
|
+
('both', 'target'): 'target',
|
|
91
|
+
('both', 'source'): 'source',
|
|
92
|
+
('both', 'none'): 'none',
|
|
93
|
+
('target', 'both'): 'target',
|
|
94
|
+
('target', 'target'): 'target',
|
|
95
|
+
('target', 'source'): 'none',
|
|
96
|
+
('target', 'none'): 'none',
|
|
97
|
+
('source', 'both'): 'source',
|
|
98
|
+
('source', 'target'): 'none',
|
|
99
|
+
('source', 'source'): 'source',
|
|
100
|
+
('source', 'none'): 'none',
|
|
101
|
+
('none', 'both'): 'none',
|
|
102
|
+
('none', 'target'): 'none',
|
|
103
|
+
('none', 'source'): 'none',
|
|
104
|
+
('none', 'none'): 'none',
|
|
105
|
+
}
|
|
106
|
+
|
|
107
|
+
_SCITAGS_NEXT_REFRESH = datetime.datetime.utcnow()
|
|
108
|
+
_SCITAGS_EXP_ID = None
|
|
109
|
+
_SCITAGS_ACTIVITY_IDS = {}
|
|
110
|
+
|
|
111
|
+
|
|
112
|
+
def _scitags_ids(logger: Callable[..., Any] = logging.log) -> "tuple[int | None, dict[str, int]]":
|
|
113
|
+
"""
|
|
114
|
+
Re-fetch if needed and return the scitags ids
|
|
115
|
+
"""
|
|
116
|
+
enabled = config_get_bool('packet-marking', 'enabled', default=False)
|
|
117
|
+
if not enabled:
|
|
118
|
+
return None, {}
|
|
119
|
+
|
|
120
|
+
now = datetime.datetime.utcnow()
|
|
121
|
+
global _SCITAGS_ACTIVITY_IDS
|
|
122
|
+
global _SCITAGS_EXP_ID
|
|
123
|
+
global _SCITAGS_NEXT_REFRESH
|
|
124
|
+
if _SCITAGS_NEXT_REFRESH < now:
|
|
125
|
+
exp_name = config_get('packet-marking', 'exp_name', default='')
|
|
126
|
+
fetch_url = config_get('packet-marking', 'fetch_url', default='https://www.scitags.org/api.json')
|
|
127
|
+
fetch_interval = config_get_int('packet-marking', 'fetch_interval', default=datetime.timedelta(hours=48).seconds)
|
|
128
|
+
fetch_timeout = config_get_int('packet-marking', 'fetch_timeout', default=5)
|
|
129
|
+
|
|
130
|
+
_SCITAGS_NEXT_REFRESH = now + datetime.timedelta(seconds=fetch_interval)
|
|
131
|
+
|
|
132
|
+
if exp_name:
|
|
133
|
+
had_exception = False
|
|
134
|
+
exp_id = None
|
|
135
|
+
activity_ids = {}
|
|
136
|
+
try:
|
|
137
|
+
result = requests.get(fetch_url, timeout=fetch_timeout)
|
|
138
|
+
if result and result.status_code == 200:
|
|
139
|
+
marks = result.json()
|
|
140
|
+
for experiment in marks.get('experiments', []):
|
|
141
|
+
if experiment.get('expName') == exp_name:
|
|
142
|
+
exp_id = experiment.get('expId')
|
|
143
|
+
for activity_dict in experiment.get('activities', []):
|
|
144
|
+
activity_name = activity_dict.get('activityName')
|
|
145
|
+
activity_id = activity_dict.get('activityId')
|
|
146
|
+
if activity_name and activity_id:
|
|
147
|
+
activity_ids[activity_name] = int(activity_id)
|
|
148
|
+
break
|
|
149
|
+
except (requests.exceptions.RequestException, TypeError, ValueError):
|
|
150
|
+
had_exception = True
|
|
151
|
+
logger(logging.WARNING, 'Failed to fetch the scitags markings', exc_info=True)
|
|
152
|
+
|
|
153
|
+
if had_exception:
|
|
154
|
+
# Retry quicker after fetch errors
|
|
155
|
+
_SCITAGS_NEXT_REFRESH = min(_SCITAGS_NEXT_REFRESH, now + datetime.timedelta(minutes=5))
|
|
156
|
+
else:
|
|
157
|
+
_SCITAGS_EXP_ID = exp_id
|
|
158
|
+
_SCITAGS_ACTIVITY_IDS = activity_ids
|
|
159
|
+
|
|
160
|
+
return _SCITAGS_EXP_ID, _SCITAGS_ACTIVITY_IDS
|
|
161
|
+
|
|
162
|
+
|
|
163
|
+
def _pick_cert_file(vo: "Optional[str]") -> "Optional[str]":
|
|
164
|
+
cert = None
|
|
165
|
+
if vo:
|
|
166
|
+
vo_cert = config_get('vo_certs', vo, False, None)
|
|
167
|
+
if vo_cert:
|
|
168
|
+
cert = vo_cert
|
|
169
|
+
elif VO_CERTS_PATH:
|
|
170
|
+
vo_cert = pathlib.Path(VO_CERTS_PATH) / vo
|
|
171
|
+
if vo_cert.exists():
|
|
172
|
+
cert = str(vo_cert)
|
|
173
|
+
if not cert:
|
|
174
|
+
usercert = config_get('conveyor', 'usercert', False, None)
|
|
175
|
+
if usercert:
|
|
176
|
+
cert = usercert
|
|
177
|
+
return cert
|
|
178
|
+
|
|
179
|
+
|
|
180
|
+
def _configured_source_strategy(activity: str, logger: Callable[..., Any]) -> str:
|
|
181
|
+
"""
|
|
182
|
+
Retrieve from the configuration the source selection strategy for the given activity
|
|
183
|
+
"""
|
|
184
|
+
try:
|
|
185
|
+
default_source_strategy = config_get(section='conveyor', option='default-source-strategy')
|
|
186
|
+
except (NoOptionError, NoSectionError, RuntimeError):
|
|
187
|
+
default_source_strategy = 'orderly'
|
|
188
|
+
|
|
189
|
+
try:
|
|
190
|
+
activity_source_strategy = config_get(section='conveyor', option='activity-source-strategy')
|
|
191
|
+
activity_source_strategy = loads(activity_source_strategy)
|
|
192
|
+
except (NoOptionError, NoSectionError, RuntimeError):
|
|
193
|
+
activity_source_strategy = {}
|
|
194
|
+
except ValueError:
|
|
195
|
+
logger(logging.WARNING, 'activity_source_strategy not properly defined')
|
|
196
|
+
activity_source_strategy = {}
|
|
197
|
+
|
|
198
|
+
return activity_source_strategy.get(str(activity), default_source_strategy)
|
|
199
|
+
|
|
200
|
+
|
|
201
|
+
def oidc_supported(transfer_hop) -> bool:
|
|
202
|
+
"""
|
|
203
|
+
checking OIDC AuthN/Z support per destination and source RSEs;
|
|
204
|
+
|
|
205
|
+
for oidc_support to be activated, all sources and the destination must explicitly support it
|
|
206
|
+
"""
|
|
207
|
+
# assumes use of boolean 'oidc_support' RSE attribute
|
|
208
|
+
if not transfer_hop.dst.rse.attributes.get('oidc_support', False):
|
|
209
|
+
return False
|
|
210
|
+
|
|
211
|
+
for source in transfer_hop.sources:
|
|
212
|
+
if not source.rse.attributes.get('oidc_support', False):
|
|
213
|
+
return False
|
|
214
|
+
return True
|
|
215
|
+
|
|
216
|
+
|
|
217
|
+
def _available_checksums(
|
|
218
|
+
transfer: "DirectTransferDefinition",
|
|
219
|
+
) -> tuple[set[str], set[str]]:
|
|
220
|
+
"""
|
|
221
|
+
Get checksums which can be used for file validation on the source and the destination RSE
|
|
222
|
+
"""
|
|
223
|
+
src_attributes = transfer.src.rse.attributes
|
|
224
|
+
if src_attributes.get('verify_checksum', True):
|
|
225
|
+
src_checksums = set(get_rse_supported_checksums_from_attributes(src_attributes))
|
|
226
|
+
else:
|
|
227
|
+
src_checksums = set()
|
|
228
|
+
|
|
229
|
+
dst_attributes = transfer.dst.rse.attributes
|
|
230
|
+
if dst_attributes.get('verify_checksum', True):
|
|
231
|
+
dst_checksums = set(get_rse_supported_checksums_from_attributes(dst_attributes))
|
|
232
|
+
else:
|
|
233
|
+
dst_checksums = set()
|
|
234
|
+
|
|
235
|
+
return src_checksums, dst_checksums
|
|
236
|
+
|
|
237
|
+
|
|
238
|
+
def _hop_checksum_validation_strategy(
|
|
239
|
+
transfer: "DirectTransferDefinition",
|
|
240
|
+
logger: Callable[..., Any],
|
|
241
|
+
) -> tuple[str, set[str]]:
|
|
242
|
+
"""
|
|
243
|
+
Compute the checksum validation strategy (none, source, destination or both) depending
|
|
244
|
+
on available source and destination checksums for a single hop transfer
|
|
245
|
+
"""
|
|
246
|
+
src_checksums, dst_checksums = _available_checksums(transfer)
|
|
247
|
+
intersection = src_checksums.intersection(dst_checksums)
|
|
248
|
+
|
|
249
|
+
if intersection:
|
|
250
|
+
strategy, possible_checksums = 'both', intersection
|
|
251
|
+
elif dst_checksums:
|
|
252
|
+
# The prioritization of destination over source here is desired, not random
|
|
253
|
+
logger(logging.INFO, f'No common checksum method for {transfer}. Verifying destination only.')
|
|
254
|
+
strategy, possible_checksums = 'target', dst_checksums
|
|
255
|
+
elif src_checksums:
|
|
256
|
+
logger(logging.INFO, f'No common checksum method for {transfer}. Verifying source only.')
|
|
257
|
+
strategy, possible_checksums = 'source', src_checksums
|
|
258
|
+
else:
|
|
259
|
+
logger(logging.INFO, f'No common checksum method for {transfer}. Not verifying source nor destination.')
|
|
260
|
+
strategy, possible_checksums = 'none', set()
|
|
261
|
+
return strategy, possible_checksums
|
|
262
|
+
|
|
263
|
+
|
|
264
|
+
def _path_checksum_validation_strategy(
|
|
265
|
+
transfer_path: "list[DirectTransferDefinition]",
|
|
266
|
+
logger: Callable[..., Any],
|
|
267
|
+
) -> str:
|
|
268
|
+
"""
|
|
269
|
+
Compute the checksum validation strategy for the whole transfer path.
|
|
270
|
+
"""
|
|
271
|
+
|
|
272
|
+
path_strategy = 'both'
|
|
273
|
+
for transfer_hop in transfer_path:
|
|
274
|
+
hop_strategy, _ = _hop_checksum_validation_strategy(transfer_hop, logger)
|
|
275
|
+
|
|
276
|
+
path_strategy = PATH_CHECKSUM_VALIDATION_STRATEGY.get((path_strategy, hop_strategy), 'none')
|
|
277
|
+
|
|
278
|
+
return path_strategy
|
|
279
|
+
|
|
280
|
+
|
|
281
|
+
def _pick_fts_checksum(
|
|
282
|
+
transfer: "DirectTransferDefinition",
|
|
283
|
+
path_strategy: "str",
|
|
284
|
+
) -> Optional[str]:
|
|
285
|
+
"""
|
|
286
|
+
Pick the checksum to use for validating file integrity on this particular transfer hop.
|
|
287
|
+
This function will only work correctly for values of 'path_strategy' which are
|
|
288
|
+
valid for the englobing multi-hop transfer path.
|
|
289
|
+
|
|
290
|
+
Returns the checksum as a string in the format expected by the FTS bulks submission API.
|
|
291
|
+
"""
|
|
292
|
+
src_checksums, dst_checksums = _available_checksums(transfer)
|
|
293
|
+
|
|
294
|
+
if path_strategy == 'both':
|
|
295
|
+
possible_checksums = src_checksums.intersection(dst_checksums)
|
|
296
|
+
elif path_strategy == 'target':
|
|
297
|
+
possible_checksums = dst_checksums
|
|
298
|
+
elif path_strategy == 'source':
|
|
299
|
+
possible_checksums = src_checksums
|
|
300
|
+
else:
|
|
301
|
+
possible_checksums = set()
|
|
302
|
+
|
|
303
|
+
checksum_to_use = None
|
|
304
|
+
for checksum_name in possible_checksums:
|
|
305
|
+
checksum_value = getattr(transfer.rws, checksum_name, '')
|
|
306
|
+
if not checksum_value:
|
|
307
|
+
continue
|
|
308
|
+
|
|
309
|
+
checksum_to_use = '%s:%s' % (checksum_name.upper(), checksum_value)
|
|
310
|
+
if checksum_name == PREFERRED_CHECKSUM:
|
|
311
|
+
break
|
|
312
|
+
|
|
313
|
+
return checksum_to_use
|
|
314
|
+
|
|
315
|
+
|
|
316
|
+
def build_job_params(transfer_path, bring_online, default_lifetime, archive_timeout_override, max_time_in_queue, logger):
|
|
317
|
+
"""
|
|
318
|
+
Prepare the job parameters which will be passed to FTS transfertool
|
|
319
|
+
"""
|
|
320
|
+
|
|
321
|
+
# The last hop is the main request (the one which triggered the whole transfer),
|
|
322
|
+
# so most attributes will come from it
|
|
323
|
+
last_hop = transfer_path[-1]
|
|
324
|
+
first_hop = transfer_path[0]
|
|
325
|
+
|
|
326
|
+
overwrite, bring_online_local = True, None
|
|
327
|
+
if first_hop.src.rse.is_tape_or_staging_required():
|
|
328
|
+
# Activate bring_online if it was requested by first hop
|
|
329
|
+
# We don't allow multihop via a tape, so bring_online should not be set on any other hop
|
|
330
|
+
bring_online_local = bring_online
|
|
331
|
+
if last_hop.dst.rse.is_tape():
|
|
332
|
+
overwrite = False
|
|
333
|
+
|
|
334
|
+
# Get dest space token
|
|
335
|
+
dest_protocol = last_hop.protocol_factory.protocol(last_hop.dst.rse, last_hop.dst.scheme, last_hop.operation_dest)
|
|
336
|
+
dest_spacetoken = None
|
|
337
|
+
if dest_protocol.attributes and 'extended_attributes' in dest_protocol.attributes and \
|
|
338
|
+
dest_protocol.attributes['extended_attributes'] and 'space_token' in dest_protocol.attributes['extended_attributes']:
|
|
339
|
+
dest_spacetoken = dest_protocol.attributes['extended_attributes']['space_token']
|
|
340
|
+
|
|
341
|
+
strict_copy = last_hop.dst.rse.attributes.get('strict_copy', False)
|
|
342
|
+
archive_timeout = last_hop.dst.rse.attributes.get('archive_timeout', None)
|
|
343
|
+
|
|
344
|
+
job_params = {'account': last_hop.rws.account,
|
|
345
|
+
'verify_checksum': _path_checksum_validation_strategy(transfer_path, logger=logger),
|
|
346
|
+
'copy_pin_lifetime': last_hop.rws.attributes.get('lifetime', default_lifetime),
|
|
347
|
+
'bring_online': bring_online_local,
|
|
348
|
+
'job_metadata': {
|
|
349
|
+
'issuer': 'rucio',
|
|
350
|
+
'multi_sources': False,
|
|
351
|
+
},
|
|
352
|
+
'overwrite': last_hop.rws.attributes.get('overwrite', overwrite),
|
|
353
|
+
'priority': last_hop.rws.priority}
|
|
354
|
+
|
|
355
|
+
if len(transfer_path) > 1:
|
|
356
|
+
job_params['multihop'] = True
|
|
357
|
+
job_params['job_metadata']['multihop'] = True
|
|
358
|
+
elif len(last_hop.legacy_sources) > 1:
|
|
359
|
+
job_params['job_metadata']['multi_sources'] = True
|
|
360
|
+
if strict_copy:
|
|
361
|
+
job_params['strict_copy'] = strict_copy
|
|
362
|
+
if dest_spacetoken:
|
|
363
|
+
job_params['spacetoken'] = dest_spacetoken
|
|
364
|
+
if last_hop.use_ipv4:
|
|
365
|
+
job_params['ipv4'] = True
|
|
366
|
+
job_params['ipv6'] = False
|
|
367
|
+
|
|
368
|
+
# assume s3alternate True (path-style URL S3 RSEs)
|
|
369
|
+
job_params['s3alternate'] = True
|
|
370
|
+
src_rse_s3_url_style = first_hop.src.rse.attributes.get('s3_url_style', None)
|
|
371
|
+
if src_rse_s3_url_style == "host":
|
|
372
|
+
job_params['s3alternate'] = False
|
|
373
|
+
dst_rse_s3_url_style = last_hop.dst.rse.attributes.get('s3_url_style', None)
|
|
374
|
+
if dst_rse_s3_url_style == "host":
|
|
375
|
+
job_params['s3alternate'] = False
|
|
376
|
+
|
|
377
|
+
if archive_timeout and last_hop.dst.rse.is_tape():
|
|
378
|
+
try:
|
|
379
|
+
archive_timeout = int(archive_timeout)
|
|
380
|
+
if archive_timeout_override is None:
|
|
381
|
+
job_params['archive_timeout'] = archive_timeout
|
|
382
|
+
elif archive_timeout_override != 0:
|
|
383
|
+
job_params['archive_timeout'] = archive_timeout_override
|
|
384
|
+
# FTS only supports dst_file metadata if archive_timeout is set
|
|
385
|
+
job_params['dst_file_report'] = True
|
|
386
|
+
logger(logging.DEBUG, 'Added archive timeout to transfer.')
|
|
387
|
+
except ValueError:
|
|
388
|
+
logger(logging.WARNING, 'Could not set archive_timeout for %s. Must be integer.', last_hop)
|
|
389
|
+
pass
|
|
390
|
+
if max_time_in_queue:
|
|
391
|
+
if last_hop.rws.activity in max_time_in_queue:
|
|
392
|
+
job_params['max_time_in_queue'] = max_time_in_queue[last_hop.rws.activity]
|
|
393
|
+
elif 'default' in max_time_in_queue:
|
|
394
|
+
job_params['max_time_in_queue'] = max_time_in_queue['default']
|
|
395
|
+
|
|
396
|
+
overwrite_hop = True
|
|
397
|
+
for transfer_hop in transfer_path[:-1]:
|
|
398
|
+
# Only allow overwrite if all hops in multihop allow it
|
|
399
|
+
h_overwrite = transfer_hop.rws.attributes.get('overwrite', True)
|
|
400
|
+
job_params['overwrite'] = h_overwrite and job_params['overwrite']
|
|
401
|
+
# Allow overwrite_hop if all intermediate hops allow it (ignoring the last hop)
|
|
402
|
+
overwrite_hop = h_overwrite and overwrite_hop
|
|
403
|
+
if not job_params['overwrite'] and overwrite_hop:
|
|
404
|
+
job_params['overwrite_hop'] = overwrite_hop
|
|
405
|
+
|
|
406
|
+
return job_params
|
|
407
|
+
|
|
408
|
+
|
|
409
|
+
def bulk_group_transfers(transfer_paths, policy='rule', group_bulk=200, source_strategy=None, max_time_in_queue=None,
|
|
410
|
+
logger=logging.log, archive_timeout_override=None, bring_online=None, default_lifetime=None):
|
|
411
|
+
"""
|
|
412
|
+
Group transfers in bulk based on certain criterias
|
|
413
|
+
|
|
414
|
+
:param transfer_paths: List of transfer paths to group. Each path is a list of single-hop transfers.
|
|
415
|
+
:param policy: Policy to use to group.
|
|
416
|
+
:param group_bulk: Bulk sizes.
|
|
417
|
+
:param source_strategy: Strategy to group sources
|
|
418
|
+
:param max_time_in_queue: Maximum time in queue
|
|
419
|
+
:param archive_timeout_override: Override the archive_timeout parameter for any transfers with it set (0 to unset)
|
|
420
|
+
:param logger: Optional decorated logger that can be passed from the calling daemons or servers.
|
|
421
|
+
:return: List of grouped transfers.
|
|
422
|
+
"""
|
|
423
|
+
|
|
424
|
+
grouped_transfers = {}
|
|
425
|
+
fts_jobs = []
|
|
426
|
+
|
|
427
|
+
for transfer_path in transfer_paths:
|
|
428
|
+
job_params = build_job_params(
|
|
429
|
+
transfer_path=transfer_path,
|
|
430
|
+
bring_online=bring_online,
|
|
431
|
+
default_lifetime=default_lifetime,
|
|
432
|
+
archive_timeout_override=archive_timeout_override,
|
|
433
|
+
max_time_in_queue=max_time_in_queue,
|
|
434
|
+
logger=logger
|
|
435
|
+
)
|
|
436
|
+
if job_params['job_metadata'].get('multi_sources') or job_params['job_metadata'].get('multihop'):
|
|
437
|
+
# for multi-hop and multi-source transfers, no bulk submission.
|
|
438
|
+
fts_jobs.append({'transfers': transfer_path[0:group_bulk], 'job_params': job_params})
|
|
439
|
+
else:
|
|
440
|
+
# it's a single-hop, single-source, transfer. Hence, a candidate for bulk submission.
|
|
441
|
+
transfer = transfer_path[0]
|
|
442
|
+
|
|
443
|
+
# we cannot group transfers together if their job_key differ
|
|
444
|
+
job_key = '%s,%s,%s,%s,%s,%s,%s,%s' % (
|
|
445
|
+
job_params['verify_checksum'],
|
|
446
|
+
job_params.get('spacetoken', ''),
|
|
447
|
+
job_params['copy_pin_lifetime'],
|
|
448
|
+
job_params['bring_online'],
|
|
449
|
+
job_params['job_metadata'],
|
|
450
|
+
job_params['overwrite'],
|
|
451
|
+
job_params['priority'],
|
|
452
|
+
job_params.get('max_time_in_queue', '')
|
|
453
|
+
)
|
|
454
|
+
|
|
455
|
+
# Additionally, we don't want to group transfers together if their policy_key differ
|
|
456
|
+
policy_key = ''
|
|
457
|
+
if policy == 'rule':
|
|
458
|
+
policy_key = '%s' % transfer.rws.rule_id
|
|
459
|
+
if policy == 'dest':
|
|
460
|
+
policy_key = '%s' % transfer.dst.rse.name
|
|
461
|
+
if policy == 'src_dest':
|
|
462
|
+
policy_key = '%s,%s' % (transfer.src.rse.name, transfer.dst.rse.name)
|
|
463
|
+
if policy == 'rule_src_dest':
|
|
464
|
+
policy_key = '%s,%s,%s' % (transfer.rws.rule_id, transfer.src.rse.name, transfer.dst.rse.name)
|
|
465
|
+
if policy == 'activity_dest':
|
|
466
|
+
policy_key = '%s %s' % (transfer.rws.activity, transfer.dst.rse.name)
|
|
467
|
+
policy_key = "_".join(policy_key.split(' '))
|
|
468
|
+
if policy == 'activity_src_dest':
|
|
469
|
+
policy_key = '%s %s %s' % (transfer.rws.activity, transfer.src.rse.name, transfer.dst.rse.name)
|
|
470
|
+
policy_key = "_".join(policy_key.split(' '))
|
|
471
|
+
# maybe here we need to hash the key if it's too long
|
|
472
|
+
|
|
473
|
+
group_key = "%s_%s" % (job_key, policy_key)
|
|
474
|
+
if group_key not in grouped_transfers:
|
|
475
|
+
grouped_transfers[group_key] = {'transfers': [], 'job_params': job_params}
|
|
476
|
+
grouped_transfers[group_key]['transfers'].append(transfer)
|
|
477
|
+
|
|
478
|
+
# split transfer groups to have at most group_bulk elements in each one
|
|
479
|
+
for group in grouped_transfers.values():
|
|
480
|
+
job_params = group['job_params']
|
|
481
|
+
for transfer_paths in chunks(group['transfers'], group_bulk):
|
|
482
|
+
fts_jobs.append({'transfers': transfer_paths, 'job_params': job_params})
|
|
483
|
+
|
|
484
|
+
return fts_jobs
|
|
485
|
+
|
|
486
|
+
|
|
487
|
+
class Fts3TransferStatusReport(TransferStatusReport):
|
|
488
|
+
|
|
489
|
+
supported_db_fields = [
|
|
490
|
+
'state',
|
|
491
|
+
'external_id',
|
|
492
|
+
'started_at',
|
|
493
|
+
'transferred_at',
|
|
494
|
+
'staging_started_at',
|
|
495
|
+
'staging_finished_at',
|
|
496
|
+
'source_rse_id',
|
|
497
|
+
'err_msg',
|
|
498
|
+
'attributes',
|
|
499
|
+
]
|
|
500
|
+
|
|
501
|
+
def __init__(self, external_host, request_id, request=None):
|
|
502
|
+
super().__init__(request_id, request=request)
|
|
503
|
+
self.external_host = external_host
|
|
504
|
+
|
|
505
|
+
# Initialized in child class constructors:
|
|
506
|
+
self._transfer_id = None
|
|
507
|
+
self._file_metadata = {}
|
|
508
|
+
self._multi_sources = None
|
|
509
|
+
self._src_url = None
|
|
510
|
+
self._dst_url = None
|
|
511
|
+
# Initialized in child class initialize():
|
|
512
|
+
self._reason = None
|
|
513
|
+
self._src_rse = None
|
|
514
|
+
self._fts_address = self.external_host
|
|
515
|
+
# Supported db fields bellow:
|
|
516
|
+
self.state = None
|
|
517
|
+
self.external_id = None
|
|
518
|
+
self.started_at = None
|
|
519
|
+
self.transferred_at = None
|
|
520
|
+
self.staging_started_at = None
|
|
521
|
+
self.staging_finished_at = None
|
|
522
|
+
self.source_rse_id = None
|
|
523
|
+
self.err_msg = None
|
|
524
|
+
self.attributes = None
|
|
525
|
+
|
|
526
|
+
def __str__(self):
|
|
527
|
+
return f'Transfer {self._transfer_id} of {self._file_metadata["scope"]}:{self._file_metadata["name"]} ' \
|
|
528
|
+
f'{self._file_metadata["src_rse"]} --({self._file_metadata["request_id"]})-> {self._file_metadata["dst_rse"]}'
|
|
529
|
+
|
|
530
|
+
def initialize(self, session, logger=logging.log):
|
|
531
|
+
raise NotImplementedError(f"{self.__class__.__name__} is abstract and shouldn't be used directly")
|
|
532
|
+
|
|
533
|
+
def get_monitor_msg_fields(self, session, logger=logging.log):
|
|
534
|
+
self.ensure_initialized(session, logger)
|
|
535
|
+
fields = {
|
|
536
|
+
'transfer_link': self._transfer_link(),
|
|
537
|
+
'reason': self._reason,
|
|
538
|
+
'src-type': self._file_metadata.get('src_type'),
|
|
539
|
+
'src-rse': self._src_rse,
|
|
540
|
+
'src-url': self._src_url,
|
|
541
|
+
'dst-type': self._file_metadata.get('src_type'),
|
|
542
|
+
'dst-rse': self._file_metadata.get('dst_rse'),
|
|
543
|
+
'dst-url': self._dst_url,
|
|
544
|
+
'started_at': self.started_at,
|
|
545
|
+
'transferred_at': self.transferred_at,
|
|
546
|
+
}
|
|
547
|
+
return fields
|
|
548
|
+
|
|
549
|
+
def _transfer_link(self):
|
|
550
|
+
return '%s/fts3/ftsmon/#/job/%s' % (self._fts_address.replace('8446', '8449'), self._transfer_id)
|
|
551
|
+
|
|
552
|
+
def _find_attribute_updates(self, request, new_state, reason, overwrite_corrupted_files):
|
|
553
|
+
attributes = None
|
|
554
|
+
if new_state == RequestState.FAILED and 'Destination file exists and overwrite is not enabled' in (reason or ''):
|
|
555
|
+
dst_file = self._file_metadata.get('dst_file', {})
|
|
556
|
+
if self._dst_file_set_and_file_corrupted(request, dst_file):
|
|
557
|
+
if overwrite_corrupted_files:
|
|
558
|
+
attributes = request['attributes']
|
|
559
|
+
attributes['overwrite'] = True
|
|
560
|
+
return attributes
|
|
561
|
+
|
|
562
|
+
def _find_used_source_rse(self, session, logger):
|
|
563
|
+
"""
|
|
564
|
+
For multi-source transfers, FTS has a choice between multiple sources.
|
|
565
|
+
Find which of the possible sources FTS actually used for the transfer.
|
|
566
|
+
"""
|
|
567
|
+
meta_rse_name = self._file_metadata.get('src_rse', None)
|
|
568
|
+
meta_rse_id = self._file_metadata.get('src_rse_id', None)
|
|
569
|
+
request_id = self._file_metadata.get('request_id', None)
|
|
570
|
+
|
|
571
|
+
if self._multi_sources and self._src_url:
|
|
572
|
+
rse_name, rse_id = get_source_rse(request_id, self._src_url, session=session)
|
|
573
|
+
if rse_name and rse_name != meta_rse_name:
|
|
574
|
+
logger(logging.DEBUG, 'Correct RSE: %s for source surl: %s' % (rse_name, self._src_url))
|
|
575
|
+
return rse_name, rse_id
|
|
576
|
+
|
|
577
|
+
return meta_rse_name, meta_rse_id
|
|
578
|
+
|
|
579
|
+
@staticmethod
|
|
580
|
+
def _dst_file_set_and_file_corrupted(request, dst_file):
|
|
581
|
+
"""
|
|
582
|
+
Returns True if the `dst_file` dict returned by fts was filled and its content allows to
|
|
583
|
+
affirm that the file is corrupted.
|
|
584
|
+
"""
|
|
585
|
+
if (request and dst_file and (
|
|
586
|
+
dst_file.get('file_size') is not None and dst_file['file_size'] != request.get('bytes')
|
|
587
|
+
or dst_file.get('checksum_type', '').lower() == 'adler32' and dst_file.get('checksum_value') != request.get('adler32')
|
|
588
|
+
or dst_file.get('checksum_type', '').lower() == 'md5' and dst_file.get('checksum_value') != request.get('md5'))):
|
|
589
|
+
return True
|
|
590
|
+
return False
|
|
591
|
+
|
|
592
|
+
@staticmethod
|
|
593
|
+
def _dst_file_set_and_file_correct(request, dst_file):
|
|
594
|
+
"""
|
|
595
|
+
Returns True if the `dst_file` dict returned by fts was filled and its content allows to
|
|
596
|
+
affirm that the file is correct.
|
|
597
|
+
"""
|
|
598
|
+
if (request and dst_file
|
|
599
|
+
and dst_file.get('file_size')
|
|
600
|
+
and dst_file.get('file_size') == request.get('bytes')
|
|
601
|
+
and (dst_file.get('checksum_type', '').lower() == 'adler32' and dst_file.get('checksum_value') == request.get('adler32')
|
|
602
|
+
or dst_file.get('checksum_type', '').lower() == 'md5' and dst_file.get('checksum_value') == request.get('md5'))):
|
|
603
|
+
return True
|
|
604
|
+
return False
|
|
605
|
+
|
|
606
|
+
@classmethod
|
|
607
|
+
def _is_recoverable_fts_overwrite_error(cls, request: dict[str, Any], reason: str,
|
|
608
|
+
file_metadata: dict[str, Any]) -> bool:
|
|
609
|
+
"""
|
|
610
|
+
Verify the special case when FTS cannot copy a file because destination exists and overwrite is disabled,
|
|
611
|
+
but the destination file is actually correct.
|
|
612
|
+
|
|
613
|
+
This can happen when some transitory error happened during a previous submission attempt.
|
|
614
|
+
Hence, the transfer is correctly executed by FTS, but rucio doesn't know about it.
|
|
615
|
+
|
|
616
|
+
Returns true when the request must be marked as successful even if it was reported failed by FTS.
|
|
617
|
+
"""
|
|
618
|
+
if not request or not file_metadata:
|
|
619
|
+
return False
|
|
620
|
+
dst_file = file_metadata.get('dst_file', {})
|
|
621
|
+
dst_type = file_metadata.get('dst_type', None)
|
|
622
|
+
METRICS.counter('overwrite.check.{rsetype}.{rse}').labels(rse=file_metadata["dst_rse"], rsetype=dst_type).inc()
|
|
623
|
+
|
|
624
|
+
if 'Destination file exists and overwrite is not enabled' in (reason or ''):
|
|
625
|
+
if cls._dst_file_set_and_file_correct(request, dst_file):
|
|
626
|
+
if dst_type == 'DISK' or dst_file.get('file_on_tape'):
|
|
627
|
+
METRICS.counter('overwrite.ok.{rsetype}.{rse}').labels(rse=file_metadata["dst_rse"], rsetype=dst_type).inc()
|
|
628
|
+
return True
|
|
629
|
+
|
|
630
|
+
METRICS.counter('overwrite.fail.{rsetype}.{rse}').labels(rse=file_metadata["dst_rse"], rsetype=dst_type).inc()
|
|
631
|
+
return False
|
|
632
|
+
|
|
633
|
+
|
|
634
|
+
class FTS3CompletionMessageTransferStatusReport(Fts3TransferStatusReport):
|
|
635
|
+
"""
|
|
636
|
+
Parses FTS Completion messages received via the message queue
|
|
637
|
+
"""
|
|
638
|
+
def __init__(self, external_host, request_id, fts_message):
|
|
639
|
+
super().__init__(external_host=external_host, request_id=request_id)
|
|
640
|
+
|
|
641
|
+
self.fts_message = fts_message
|
|
642
|
+
|
|
643
|
+
self._transfer_id = fts_message.get('tr_id').split("__")[-1]
|
|
644
|
+
|
|
645
|
+
self._file_metadata = fts_message['file_metadata']
|
|
646
|
+
self._multi_sources = str(fts_message.get('job_metadata', {}).get('multi_sources', '')).lower() == str('true')
|
|
647
|
+
self._src_url = fts_message.get('src_url', None)
|
|
648
|
+
self._dst_url = fts_message.get('dst_url', None)
|
|
649
|
+
|
|
650
|
+
def initialize(self, session, logger=logging.log):
|
|
651
|
+
|
|
652
|
+
fts_message = self.fts_message
|
|
653
|
+
request_id = self.request_id
|
|
654
|
+
|
|
655
|
+
reason = fts_message.get('t__error_message', None)
|
|
656
|
+
# job_state = fts_message.get('t_final_transfer_state', None)
|
|
657
|
+
new_state = None
|
|
658
|
+
if str(fts_message['t_final_transfer_state']) == FTS_COMPLETE_STATE.OK and not fts_message.get('is_archiving'): # pylint:disable=no-member
|
|
659
|
+
new_state = RequestState.DONE
|
|
660
|
+
elif str(fts_message['t_final_transfer_state']) == FTS_COMPLETE_STATE.ERROR:
|
|
661
|
+
request = self.request(session)
|
|
662
|
+
if self._is_recoverable_fts_overwrite_error(request, reason, self._file_metadata): # pylint:disable=no-member
|
|
663
|
+
new_state = RequestState.DONE
|
|
664
|
+
else:
|
|
665
|
+
new_state = RequestState.FAILED
|
|
666
|
+
|
|
667
|
+
transfer_id = self._transfer_id
|
|
668
|
+
if new_state:
|
|
669
|
+
request = self.request(session)
|
|
670
|
+
if not request:
|
|
671
|
+
logger(logging.WARNING, '%s: no request with this id in the database. Skipping. external_id: %s (%s). new_state: %s', request_id, transfer_id, self.external_host, new_state)
|
|
672
|
+
return
|
|
673
|
+
if request and request['external_id'] == transfer_id and request['state'] != new_state:
|
|
674
|
+
src_rse_name, src_rse_id = self._find_used_source_rse(session, logger)
|
|
675
|
+
|
|
676
|
+
self._reason = reason
|
|
677
|
+
self._src_rse = src_rse_name
|
|
678
|
+
self._fts_address = request['external_host'] or self._fts_address
|
|
679
|
+
|
|
680
|
+
self.state = new_state
|
|
681
|
+
self.external_id = transfer_id
|
|
682
|
+
self.started_at = datetime.datetime.utcfromtimestamp(float(fts_message.get('tr_timestamp_start', 0)) / 1000)
|
|
683
|
+
self.transferred_at = datetime.datetime.utcfromtimestamp(float(fts_message.get('tr_timestamp_complete', 0)) / 1000)
|
|
684
|
+
self.staging_started_at = None
|
|
685
|
+
self.staging_finished_at = None
|
|
686
|
+
self.source_rse_id = src_rse_id
|
|
687
|
+
self.err_msg = get_transfer_error(self.state, reason)
|
|
688
|
+
if self.err_msg and self._file_metadata.get('src_type') == "TAPE":
|
|
689
|
+
self.err_msg = '[TAPE SOURCE] ' + self.err_msg
|
|
690
|
+
self.attributes = self._find_attribute_updates(
|
|
691
|
+
request=request,
|
|
692
|
+
new_state=new_state,
|
|
693
|
+
reason=reason,
|
|
694
|
+
overwrite_corrupted_files=config_get_bool('transfers', 'overwrite_corrupted_files', default=False, session=session),
|
|
695
|
+
)
|
|
696
|
+
elif request['external_id'] != transfer_id:
|
|
697
|
+
logger(logging.WARNING, "Response %s with transfer id %s is different from the request transfer id %s, will not update" % (request_id, transfer_id, request['external_id']))
|
|
698
|
+
else:
|
|
699
|
+
logger(logging.DEBUG, "Request %s is already in %s state, will not update" % (request_id, new_state))
|
|
700
|
+
else:
|
|
701
|
+
logger(logging.DEBUG, "No state change computed for %s. Skipping request update." % request_id)
|
|
702
|
+
|
|
703
|
+
|
|
704
|
+
class FTS3ApiTransferStatusReport(Fts3TransferStatusReport):
|
|
705
|
+
"""
|
|
706
|
+
Parses FTS api response
|
|
707
|
+
"""
|
|
708
|
+
def __init__(self, external_host, request_id, job_response, file_response, request=None):
|
|
709
|
+
super().__init__(external_host=external_host, request_id=request_id, request=request)
|
|
710
|
+
|
|
711
|
+
self.job_response = job_response
|
|
712
|
+
self.file_response = file_response
|
|
713
|
+
|
|
714
|
+
self._transfer_id = job_response.get('job_id')
|
|
715
|
+
|
|
716
|
+
self._file_metadata = file_response['file_metadata']
|
|
717
|
+
self._multi_sources = str(job_response['job_metadata'].get('multi_sources', '')).lower() == str('true')
|
|
718
|
+
self._src_url = file_response.get('source_surl', None)
|
|
719
|
+
self._dst_url = file_response.get('dest_surl', None)
|
|
720
|
+
self.logger = logging.log
|
|
721
|
+
|
|
722
|
+
def initialize(self, session, logger=logging.log):
|
|
723
|
+
|
|
724
|
+
self.logger = logger
|
|
725
|
+
job_response = self.job_response
|
|
726
|
+
file_response = self.file_response
|
|
727
|
+
request_id = self.request_id
|
|
728
|
+
|
|
729
|
+
file_state = file_response['file_state']
|
|
730
|
+
reason = file_response.get('reason', None)
|
|
731
|
+
|
|
732
|
+
new_state = None
|
|
733
|
+
job_state = job_response.get('job_state', None)
|
|
734
|
+
multi_hop = job_response.get('job_type') == FTS_JOB_TYPE.MULTI_HOP
|
|
735
|
+
job_state_is_final = job_state in FINAL_FTS_JOB_STATES
|
|
736
|
+
file_state_is_final = file_state in FINAL_FTS_FILE_STATES
|
|
737
|
+
if file_state_is_final:
|
|
738
|
+
if file_state == FTS_STATE.FINISHED:
|
|
739
|
+
new_state = RequestState.DONE
|
|
740
|
+
elif file_state == FTS_STATE.FAILED and job_state_is_final or \
|
|
741
|
+
file_state == FTS_STATE.FAILED and not self._multi_sources: # for multi-source transfers we must wait for the job to be in a final state
|
|
742
|
+
if self._is_recoverable_fts_overwrite_error(self.request(session), reason, self._file_metadata):
|
|
743
|
+
new_state = RequestState.DONE
|
|
744
|
+
else:
|
|
745
|
+
new_state = RequestState.FAILED
|
|
746
|
+
elif job_state_is_final and file_state == FTS_STATE.CANCELED:
|
|
747
|
+
new_state = RequestState.FAILED
|
|
748
|
+
elif job_state_is_final and file_state == FTS_STATE.NOT_USED:
|
|
749
|
+
if job_state == FTS_STATE.FINISHED:
|
|
750
|
+
# it is a multi-source transfer. This source wasn't used, but another one was successful
|
|
751
|
+
new_state = RequestState.DONE
|
|
752
|
+
else:
|
|
753
|
+
# failed multi-source or multi-hop (you cannot have unused sources in a successful multi-hop)
|
|
754
|
+
new_state = RequestState.FAILED
|
|
755
|
+
if not reason and multi_hop:
|
|
756
|
+
reason = 'Unused hop in multi-hop'
|
|
757
|
+
|
|
758
|
+
transfer_id = self._transfer_id
|
|
759
|
+
if new_state:
|
|
760
|
+
request = self.request(session)
|
|
761
|
+
if not request:
|
|
762
|
+
logger(logging.WARNING, '%s: no request with this id in the database. Skipping. external_id: %s (%s). new_state: %s', request_id, transfer_id, self.external_host, new_state)
|
|
763
|
+
return
|
|
764
|
+
if request['external_id'] == transfer_id and request['state'] != new_state:
|
|
765
|
+
src_rse_name, src_rse_id = self._find_used_source_rse(session, logger)
|
|
766
|
+
|
|
767
|
+
self._reason = reason
|
|
768
|
+
self._src_rse = src_rse_name
|
|
769
|
+
|
|
770
|
+
self.state = new_state
|
|
771
|
+
self.external_id = transfer_id
|
|
772
|
+
self.started_at = datetime.datetime.strptime(file_response['start_time'], '%Y-%m-%dT%H:%M:%S') if file_response['start_time'] else None
|
|
773
|
+
self.transferred_at = datetime.datetime.strptime(file_response['finish_time'], '%Y-%m-%dT%H:%M:%S') if file_response['finish_time'] else None
|
|
774
|
+
self.staging_started_at = datetime.datetime.strptime(file_response['staging_start'], '%Y-%m-%dT%H:%M:%S') if file_response['staging_start'] else None
|
|
775
|
+
self.staging_finished_at = datetime.datetime.strptime(file_response['staging_finished'], '%Y-%m-%dT%H:%M:%S') if file_response['staging_finished'] else None
|
|
776
|
+
self.source_rse_id = src_rse_id
|
|
777
|
+
self.err_msg = get_transfer_error(self.state, reason)
|
|
778
|
+
if self.err_msg and self._file_metadata.get('src_type') == "TAPE":
|
|
779
|
+
self.err_msg = '[TAPE SOURCE] ' + self.err_msg
|
|
780
|
+
self.attributes = self._find_attribute_updates(
|
|
781
|
+
request=request,
|
|
782
|
+
new_state=new_state,
|
|
783
|
+
reason=reason,
|
|
784
|
+
overwrite_corrupted_files=config_get_bool('transfers', 'overwrite_corrupted_files', default=False, session=session),
|
|
785
|
+
)
|
|
786
|
+
elif request['external_id'] != transfer_id:
|
|
787
|
+
logger(logging.WARNING, "Response %s with transfer id %s is different from the request transfer id %s, will not update" % (request_id, transfer_id, request['external_id']))
|
|
788
|
+
else:
|
|
789
|
+
logger(logging.DEBUG, "Request %s is already in %s state, will not update" % (request_id, new_state))
|
|
790
|
+
|
|
791
|
+
|
|
792
|
+
class FTS3Transfertool(Transfertool):
|
|
793
|
+
"""
|
|
794
|
+
FTS3 implementation of a Rucio transfertool
|
|
795
|
+
"""
|
|
796
|
+
|
|
797
|
+
external_name = 'fts3'
|
|
798
|
+
required_rse_attrs = ('fts', )
|
|
799
|
+
|
|
800
|
+
def __init__(self, external_host, oidc_account=None, vo=None, group_bulk=1, group_policy='rule', source_strategy=None,
|
|
801
|
+
max_time_in_queue=None, bring_online=43200, default_lifetime=172800, archive_timeout_override=None,
|
|
802
|
+
logger=logging.log):
|
|
803
|
+
"""
|
|
804
|
+
Initializes the transfertool
|
|
805
|
+
|
|
806
|
+
:param external_host: The external host where the transfertool API is running
|
|
807
|
+
:param oidc_account: optional oidc account to use for submission
|
|
808
|
+
"""
|
|
809
|
+
super().__init__(external_host, logger)
|
|
810
|
+
|
|
811
|
+
self.group_policy = group_policy
|
|
812
|
+
self.group_bulk = group_bulk
|
|
813
|
+
self.source_strategy = source_strategy
|
|
814
|
+
self.max_time_in_queue = max_time_in_queue or {}
|
|
815
|
+
self.bring_online = bring_online
|
|
816
|
+
self.default_lifetime = default_lifetime
|
|
817
|
+
self.archive_timeout_override = archive_timeout_override
|
|
818
|
+
|
|
819
|
+
# token for OAuth 2.0 OIDC authorization scheme (working only with dCache + davs/https protocols as of Sep 2019)
|
|
820
|
+
self.token = None
|
|
821
|
+
if oidc_account:
|
|
822
|
+
getadmintoken = False
|
|
823
|
+
if ALLOW_USER_OIDC_TOKENS is False:
|
|
824
|
+
getadmintoken = True
|
|
825
|
+
self.logger(logging.DEBUG, 'Attempting to get a token for account %s. Admin token option set to %s' % (oidc_account, getadmintoken))
|
|
826
|
+
# find the appropriate OIDC token and exchange it (for user accounts) if necessary
|
|
827
|
+
token_dict = get_token_for_account_operation(oidc_account, req_audience=REQUEST_OIDC_AUDIENCE, req_scope=REQUEST_OIDC_SCOPE, admin=getadmintoken)
|
|
828
|
+
if token_dict is not None:
|
|
829
|
+
self.logger(logging.DEBUG, 'Access token has been granted.')
|
|
830
|
+
if 'token' in token_dict:
|
|
831
|
+
self.logger(logging.DEBUG, 'Access token used as transfer token.')
|
|
832
|
+
self.token = token_dict['token']
|
|
833
|
+
|
|
834
|
+
self.deterministic_id = config_get_bool('conveyor', 'use_deterministic_id', False, False)
|
|
835
|
+
self.headers = {'Content-Type': 'application/json'}
|
|
836
|
+
if self.external_host.startswith('https://'):
|
|
837
|
+
if self.token:
|
|
838
|
+
self.cert = None
|
|
839
|
+
self.verify = False
|
|
840
|
+
self.headers['Authorization'] = 'Bearer ' + self.token
|
|
841
|
+
else:
|
|
842
|
+
cert = _pick_cert_file(vo=vo)
|
|
843
|
+
self.cert = (cert, cert)
|
|
844
|
+
self.verify = False
|
|
845
|
+
else:
|
|
846
|
+
self.cert = None
|
|
847
|
+
self.verify = True # True is the default setting of a requests.* method
|
|
848
|
+
|
|
849
|
+
self.scitags_exp_id, self.scitags_activity_ids = _scitags_ids(logger=logger)
|
|
850
|
+
|
|
851
|
+
@classmethod
|
|
852
|
+
def _pick_fts_servers(cls, source_rse: "RseData", dest_rse: "RseData"):
|
|
853
|
+
"""
|
|
854
|
+
Pick fts servers to use for submission between the two given rse
|
|
855
|
+
"""
|
|
856
|
+
source_servers = source_rse.attributes.get('fts', None)
|
|
857
|
+
dest_servers = dest_rse.attributes.get('fts', None)
|
|
858
|
+
if source_servers is None or dest_servers is None:
|
|
859
|
+
return None
|
|
860
|
+
|
|
861
|
+
servers_to_use = dest_servers
|
|
862
|
+
if source_rse.attributes.get('sign_url', None) == 'gcs':
|
|
863
|
+
servers_to_use = source_servers
|
|
864
|
+
|
|
865
|
+
return servers_to_use.split(',')
|
|
866
|
+
|
|
867
|
+
@classmethod
|
|
868
|
+
def can_perform_transfer(cls, source_rse: "RseData", dest_rse: "RseData"):
|
|
869
|
+
if cls._pick_fts_servers(source_rse, dest_rse):
|
|
870
|
+
return True
|
|
871
|
+
return False
|
|
872
|
+
|
|
873
|
+
@classmethod
|
|
874
|
+
def submission_builder_for_path(cls, transfer_path, logger=logging.log):
|
|
875
|
+
vo = None
|
|
876
|
+
if config_get_bool('common', 'multi_vo', False, None):
|
|
877
|
+
vo = transfer_path[-1].rws.scope.vo
|
|
878
|
+
|
|
879
|
+
sub_path = []
|
|
880
|
+
fts_hosts = []
|
|
881
|
+
for hop in transfer_path:
|
|
882
|
+
hosts = cls._pick_fts_servers(hop.src.rse, hop.dst.rse)
|
|
883
|
+
if hosts:
|
|
884
|
+
fts_hosts = hosts
|
|
885
|
+
sub_path.append(hop)
|
|
886
|
+
else:
|
|
887
|
+
break
|
|
888
|
+
|
|
889
|
+
if len(sub_path) < len(transfer_path):
|
|
890
|
+
logger(logging.INFO, 'FTS3Transfertool can only submit {} hops from {}'.format(len(sub_path), [str(hop) for hop in transfer_path]))
|
|
891
|
+
|
|
892
|
+
if sub_path:
|
|
893
|
+
oidc_account = None
|
|
894
|
+
if all(oidc_supported(t) for t in sub_path):
|
|
895
|
+
logger(logging.DEBUG, 'OAuth2/OIDC available for transfer {}'.format([str(hop) for hop in sub_path]))
|
|
896
|
+
oidc_account = transfer_path[-1].rws.account
|
|
897
|
+
return sub_path, TransferToolBuilder(cls, external_host=fts_hosts[0], oidc_account=oidc_account, vo=vo)
|
|
898
|
+
else:
|
|
899
|
+
return [], None
|
|
900
|
+
|
|
901
|
+
def group_into_submit_jobs(self, transfer_paths):
|
|
902
|
+
jobs = bulk_group_transfers(
|
|
903
|
+
transfer_paths,
|
|
904
|
+
policy=self.group_policy,
|
|
905
|
+
group_bulk=self.group_bulk,
|
|
906
|
+
source_strategy=self.source_strategy,
|
|
907
|
+
max_time_in_queue=self.max_time_in_queue,
|
|
908
|
+
bring_online=self.bring_online,
|
|
909
|
+
default_lifetime=self.default_lifetime,
|
|
910
|
+
archive_timeout_override=self.archive_timeout_override,
|
|
911
|
+
logger=self.logger,
|
|
912
|
+
)
|
|
913
|
+
return jobs
|
|
914
|
+
|
|
915
|
+
def _file_from_transfer(self, transfer, job_params):
|
|
916
|
+
rws = transfer.rws
|
|
917
|
+
checksum_to_use = _pick_fts_checksum(transfer, path_strategy=job_params['verify_checksum'])
|
|
918
|
+
t_file = {
|
|
919
|
+
'sources': [s[1] for s in transfer.legacy_sources],
|
|
920
|
+
'destinations': [transfer.dest_url],
|
|
921
|
+
'metadata': {
|
|
922
|
+
'request_id': rws.request_id,
|
|
923
|
+
'scope': rws.scope,
|
|
924
|
+
'name': rws.name,
|
|
925
|
+
'activity': rws.activity,
|
|
926
|
+
'request_type': rws.request_type,
|
|
927
|
+
'src_type': "TAPE" if transfer.src.rse.is_tape_or_staging_required() else 'DISK',
|
|
928
|
+
'dst_type': "TAPE" if transfer.dst.rse.is_tape() else 'DISK',
|
|
929
|
+
'src_rse': transfer.src.rse.name,
|
|
930
|
+
'dst_rse': transfer.dst.rse.name,
|
|
931
|
+
'src_rse_id': transfer.src.rse.id,
|
|
932
|
+
'dest_rse_id': transfer.dst.rse.id,
|
|
933
|
+
'filesize': rws.byte_count,
|
|
934
|
+
'md5': rws.md5,
|
|
935
|
+
'adler32': rws.adler32
|
|
936
|
+
},
|
|
937
|
+
'filesize': rws.byte_count,
|
|
938
|
+
'checksum': checksum_to_use,
|
|
939
|
+
'selection_strategy': self.source_strategy if self.source_strategy else _configured_source_strategy(transfer.rws.activity, logger=self.logger),
|
|
940
|
+
'activity': rws.activity
|
|
941
|
+
}
|
|
942
|
+
if isinstance(self.scitags_exp_id, int):
|
|
943
|
+
activity_id = self.scitags_activity_ids.get(rws.activity)
|
|
944
|
+
if isinstance(activity_id, int):
|
|
945
|
+
t_file['scitag'] = self.scitags_exp_id << 6 | activity_id
|
|
946
|
+
return t_file
|
|
947
|
+
|
|
948
|
+
def submit(self, transfers, job_params, timeout=None):
|
|
949
|
+
"""
|
|
950
|
+
Submit transfers to FTS3 via JSON.
|
|
951
|
+
|
|
952
|
+
:param files: List of dictionaries describing the file transfers.
|
|
953
|
+
:param job_params: Dictionary containing key/value pairs, for all transfers.
|
|
954
|
+
:param timeout: Timeout in seconds.
|
|
955
|
+
:returns: FTS transfer identifier.
|
|
956
|
+
"""
|
|
957
|
+
files = []
|
|
958
|
+
for transfer in transfers:
|
|
959
|
+
files.append(self._file_from_transfer(transfer, job_params))
|
|
960
|
+
|
|
961
|
+
# FTS3 expects 'davs' as the scheme identifier instead of https
|
|
962
|
+
for transfer_file in files:
|
|
963
|
+
if not transfer_file['sources'] or transfer_file['sources'] == []:
|
|
964
|
+
raise Exception('No sources defined')
|
|
965
|
+
|
|
966
|
+
# TODO: remove the following logic in rucio 1.31
|
|
967
|
+
if REWRITE_HTTPS_TO_DAVS:
|
|
968
|
+
new_src_urls = []
|
|
969
|
+
new_dst_urls = []
|
|
970
|
+
for url in transfer_file['sources']:
|
|
971
|
+
if url.startswith('https'):
|
|
972
|
+
new_src_urls.append(':'.join(['davs'] + url.split(':')[1:]))
|
|
973
|
+
else:
|
|
974
|
+
new_src_urls.append(url)
|
|
975
|
+
for url in transfer_file['destinations']:
|
|
976
|
+
if url.startswith('https'):
|
|
977
|
+
new_dst_urls.append(':'.join(['davs'] + url.split(':')[1:]))
|
|
978
|
+
else:
|
|
979
|
+
new_dst_urls.append(url)
|
|
980
|
+
|
|
981
|
+
transfer_file['sources'] = new_src_urls
|
|
982
|
+
transfer_file['destinations'] = new_dst_urls
|
|
983
|
+
|
|
984
|
+
transfer_id = None
|
|
985
|
+
expected_transfer_id = None
|
|
986
|
+
if self.deterministic_id:
|
|
987
|
+
job_params = job_params.copy()
|
|
988
|
+
job_params["id_generator"] = "deterministic"
|
|
989
|
+
job_params["sid"] = files[0]['metadata']['request_id']
|
|
990
|
+
expected_transfer_id = self.__get_deterministic_id(job_params["sid"])
|
|
991
|
+
self.logger(logging.DEBUG, "Submit bulk transfers in deterministic mode, sid %s, expected transfer id: %s", job_params["sid"], expected_transfer_id)
|
|
992
|
+
|
|
993
|
+
# bulk submission
|
|
994
|
+
params_dict = {'files': files, 'params': job_params}
|
|
995
|
+
params_str = json.dumps(params_dict, cls=APIEncoder)
|
|
996
|
+
|
|
997
|
+
post_result = None
|
|
998
|
+
stopwatch = Stopwatch()
|
|
999
|
+
try:
|
|
1000
|
+
post_result = requests.post('%s/jobs' % self.external_host,
|
|
1001
|
+
verify=self.verify,
|
|
1002
|
+
cert=self.cert,
|
|
1003
|
+
data=params_str,
|
|
1004
|
+
headers=self.headers,
|
|
1005
|
+
timeout=timeout)
|
|
1006
|
+
labels = {'host': self.__extract_host(self.external_host)}
|
|
1007
|
+
METRICS.timer('submit_transfer.{host}').labels(**labels).observe(stopwatch.elapsed / (len(files) or 1))
|
|
1008
|
+
except ReadTimeout as error:
|
|
1009
|
+
raise TransferToolTimeout(error)
|
|
1010
|
+
except json.JSONDecodeError as error:
|
|
1011
|
+
raise TransferToolWrongAnswer(error)
|
|
1012
|
+
except Exception as error:
|
|
1013
|
+
self.logger(logging.WARNING, 'Could not submit transfer to %s - %s' % (self.external_host, str(error)))
|
|
1014
|
+
|
|
1015
|
+
if post_result and post_result.status_code == 200:
|
|
1016
|
+
SUBMISSION_COUNTER.labels(state='success', host=self.__extract_host(self.external_host)).inc(len(files))
|
|
1017
|
+
transfer_id = str(post_result.json()['job_id'])
|
|
1018
|
+
elif post_result and post_result.status_code == 409:
|
|
1019
|
+
SUBMISSION_COUNTER.labels(state='failure', host=self.__extract_host(self.external_host)).inc(len(files))
|
|
1020
|
+
raise DuplicateFileTransferSubmission()
|
|
1021
|
+
else:
|
|
1022
|
+
if expected_transfer_id:
|
|
1023
|
+
transfer_id = expected_transfer_id
|
|
1024
|
+
self.logger(logging.WARNING, "Failed to submit transfer to %s, will use expected transfer id %s, error: %s", self.external_host, transfer_id, post_result.text if post_result is not None else post_result)
|
|
1025
|
+
else:
|
|
1026
|
+
self.logger(logging.WARNING, "Failed to submit transfer to %s, error: %s", self.external_host, post_result.text if post_result is not None else post_result)
|
|
1027
|
+
SUBMISSION_COUNTER.labels(state='failure', host=self.__extract_host(self.external_host)).inc(len(files))
|
|
1028
|
+
|
|
1029
|
+
if not transfer_id:
|
|
1030
|
+
raise TransferToolWrongAnswer('No transfer id returned by %s' % self.external_host)
|
|
1031
|
+
METRICS.timer('submit_transfers_fts3').observe(stopwatch.elapsed / (len(transfers) or 1))
|
|
1032
|
+
return transfer_id
|
|
1033
|
+
|
|
1034
|
+
def cancel(self, transfer_ids, timeout=None):
|
|
1035
|
+
"""
|
|
1036
|
+
Cancel transfers that have been submitted to FTS3.
|
|
1037
|
+
|
|
1038
|
+
:param transfer_ids: FTS transfer identifiers as list of strings.
|
|
1039
|
+
:param timeout: Timeout in seconds.
|
|
1040
|
+
:returns: True if cancellation was successful.
|
|
1041
|
+
"""
|
|
1042
|
+
|
|
1043
|
+
if len(transfer_ids) > 1:
|
|
1044
|
+
raise NotImplementedError('Bulk cancelling not implemented')
|
|
1045
|
+
transfer_id = transfer_ids[0]
|
|
1046
|
+
|
|
1047
|
+
job = None
|
|
1048
|
+
|
|
1049
|
+
job = requests.delete('%s/jobs/%s' % (self.external_host, transfer_id),
|
|
1050
|
+
verify=self.verify,
|
|
1051
|
+
cert=self.cert,
|
|
1052
|
+
headers=self.headers,
|
|
1053
|
+
timeout=timeout)
|
|
1054
|
+
|
|
1055
|
+
if job and job.status_code == 200:
|
|
1056
|
+
CANCEL_COUNTER.labels(state='success', host=self.__extract_host(self.external_host)).inc()
|
|
1057
|
+
return job.json()
|
|
1058
|
+
|
|
1059
|
+
CANCEL_COUNTER.labels(state='failure', host=self.__extract_host(self.external_host)).inc()
|
|
1060
|
+
raise Exception('Could not cancel transfer: %s', job.content)
|
|
1061
|
+
|
|
1062
|
+
def update_priority(self, transfer_id, priority, timeout=None):
|
|
1063
|
+
"""
|
|
1064
|
+
Update the priority of a transfer that has been submitted to FTS via JSON.
|
|
1065
|
+
|
|
1066
|
+
:param transfer_id: FTS transfer identifier as a string.
|
|
1067
|
+
:param priority: FTS job priority as an integer from 1 to 5.
|
|
1068
|
+
:param timeout: Timeout in seconds.
|
|
1069
|
+
:returns: True if update was successful.
|
|
1070
|
+
"""
|
|
1071
|
+
|
|
1072
|
+
job = None
|
|
1073
|
+
params_dict = {"params": {"priority": priority}}
|
|
1074
|
+
params_str = json.dumps(params_dict, cls=APIEncoder)
|
|
1075
|
+
|
|
1076
|
+
job = requests.post('%s/jobs/%s' % (self.external_host, transfer_id),
|
|
1077
|
+
verify=self.verify,
|
|
1078
|
+
data=params_str,
|
|
1079
|
+
cert=self.cert,
|
|
1080
|
+
headers=self.headers,
|
|
1081
|
+
timeout=timeout) # TODO set to 3 in conveyor
|
|
1082
|
+
|
|
1083
|
+
if job and job.status_code == 200:
|
|
1084
|
+
UPDATE_PRIORITY_COUNTER.labels(state='success', host=self.__extract_host(self.external_host)).inc()
|
|
1085
|
+
return job.json()
|
|
1086
|
+
|
|
1087
|
+
UPDATE_PRIORITY_COUNTER.labels(state='failure', host=self.__extract_host(self.external_host)).inc()
|
|
1088
|
+
raise Exception('Could not update priority of transfer: %s', job.content)
|
|
1089
|
+
|
|
1090
|
+
def query(self, transfer_ids, details=False, timeout=None):
|
|
1091
|
+
"""
|
|
1092
|
+
Query the status of a transfer in FTS3 via JSON.
|
|
1093
|
+
|
|
1094
|
+
:param transfer_ids: FTS transfer identifiers as list of strings.
|
|
1095
|
+
:param details: Switch if detailed information should be listed.
|
|
1096
|
+
:param timeout: Timeout in seconds.
|
|
1097
|
+
:returns: Transfer status information as a list of dictionaries.
|
|
1098
|
+
"""
|
|
1099
|
+
|
|
1100
|
+
if len(transfer_ids) > 1:
|
|
1101
|
+
raise NotImplementedError('FTS3 transfertool query not bulk ready')
|
|
1102
|
+
|
|
1103
|
+
transfer_id = transfer_ids[0]
|
|
1104
|
+
if details:
|
|
1105
|
+
return self.__query_details(transfer_id=transfer_id)
|
|
1106
|
+
|
|
1107
|
+
job = None
|
|
1108
|
+
|
|
1109
|
+
job = requests.get('%s/jobs/%s' % (self.external_host, transfer_id),
|
|
1110
|
+
verify=self.verify,
|
|
1111
|
+
cert=self.cert,
|
|
1112
|
+
headers=self.headers,
|
|
1113
|
+
timeout=timeout) # TODO Set to 5 in conveyor
|
|
1114
|
+
if job and job.status_code == 200:
|
|
1115
|
+
QUERY_COUNTER.labels(state='success', host=self.__extract_host(self.external_host)).inc()
|
|
1116
|
+
return [job.json()]
|
|
1117
|
+
|
|
1118
|
+
QUERY_COUNTER.labels(state='failure', host=self.__extract_host(self.external_host)).inc()
|
|
1119
|
+
raise Exception('Could not retrieve transfer information: %s', job.content)
|
|
1120
|
+
|
|
1121
|
+
# Public methods, not part of the common interface specification (FTS3 specific)
|
|
1122
|
+
|
|
1123
|
+
def whoami(self):
|
|
1124
|
+
"""
|
|
1125
|
+
Returns credential information from the FTS3 server.
|
|
1126
|
+
|
|
1127
|
+
:returns: Credentials as stored by the FTS3 server as a dictionary.
|
|
1128
|
+
"""
|
|
1129
|
+
|
|
1130
|
+
get_result = None
|
|
1131
|
+
|
|
1132
|
+
get_result = requests.get('%s/whoami' % self.external_host,
|
|
1133
|
+
verify=self.verify,
|
|
1134
|
+
cert=self.cert,
|
|
1135
|
+
headers=self.headers)
|
|
1136
|
+
|
|
1137
|
+
if get_result and get_result.status_code == 200:
|
|
1138
|
+
WHOAMI_COUNTER.labels(state='success', host=self.__extract_host(self.external_host)).inc()
|
|
1139
|
+
return get_result.json()
|
|
1140
|
+
|
|
1141
|
+
WHOAMI_COUNTER.labels(state='failure', host=self.__extract_host(self.external_host)).inc()
|
|
1142
|
+
raise Exception('Could not retrieve credentials: %s', get_result.content)
|
|
1143
|
+
|
|
1144
|
+
def version(self):
|
|
1145
|
+
"""
|
|
1146
|
+
Returns FTS3 server information.
|
|
1147
|
+
|
|
1148
|
+
:returns: FTS3 server information as a dictionary.
|
|
1149
|
+
"""
|
|
1150
|
+
|
|
1151
|
+
get_result = None
|
|
1152
|
+
|
|
1153
|
+
get_result = requests.get('%s/' % self.external_host,
|
|
1154
|
+
verify=self.verify,
|
|
1155
|
+
cert=self.cert,
|
|
1156
|
+
headers=self.headers)
|
|
1157
|
+
|
|
1158
|
+
if get_result and get_result.status_code == 200:
|
|
1159
|
+
VERSION_COUNTER.labels(state='success', host=self.__extract_host(self.external_host)).inc()
|
|
1160
|
+
return get_result.json()
|
|
1161
|
+
|
|
1162
|
+
VERSION_COUNTER.labels(state='failure', host=self.__extract_host(self.external_host)).inc()
|
|
1163
|
+
raise Exception('Could not retrieve version: %s', get_result.content)
|
|
1164
|
+
|
|
1165
|
+
def bulk_query(self, requests_by_eid, timeout=None):
|
|
1166
|
+
"""
|
|
1167
|
+
Query the status of a bulk of transfers in FTS3 via JSON.
|
|
1168
|
+
|
|
1169
|
+
:param requests_by_eid: dictionary {external_id1: {request_id1: request1, ...}, ...} of request to be queried
|
|
1170
|
+
:returns: Transfer status information as a dictionary.
|
|
1171
|
+
"""
|
|
1172
|
+
|
|
1173
|
+
responses = {}
|
|
1174
|
+
fts_session = requests.Session()
|
|
1175
|
+
xfer_ids = ','.join(requests_by_eid)
|
|
1176
|
+
jobs = fts_session.get('%s/jobs/%s?files=file_state,dest_surl,finish_time,start_time,staging_start,staging_finished,reason,source_surl,file_metadata' % (self.external_host, xfer_ids),
|
|
1177
|
+
verify=self.verify,
|
|
1178
|
+
cert=self.cert,
|
|
1179
|
+
headers=self.headers,
|
|
1180
|
+
timeout=timeout)
|
|
1181
|
+
|
|
1182
|
+
if jobs is None:
|
|
1183
|
+
BULK_QUERY_COUNTER.labels(state='failure', host=self.__extract_host(self.external_host)).inc()
|
|
1184
|
+
for transfer_id in requests_by_eid:
|
|
1185
|
+
responses[transfer_id] = Exception('Transfer information returns None: %s' % jobs)
|
|
1186
|
+
elif jobs.status_code in (200, 207, 404):
|
|
1187
|
+
try:
|
|
1188
|
+
BULK_QUERY_COUNTER.labels(state='success', host=self.__extract_host(self.external_host)).inc()
|
|
1189
|
+
jobs_response = jobs.json()
|
|
1190
|
+
responses = self.__bulk_query_responses(jobs_response, requests_by_eid)
|
|
1191
|
+
except ReadTimeout as error:
|
|
1192
|
+
raise TransferToolTimeout(error)
|
|
1193
|
+
except json.JSONDecodeError as error:
|
|
1194
|
+
raise TransferToolWrongAnswer(error)
|
|
1195
|
+
except Exception as error:
|
|
1196
|
+
raise Exception("Failed to parse the job response: %s, error: %s" % (str(jobs), str(error)))
|
|
1197
|
+
else:
|
|
1198
|
+
BULK_QUERY_COUNTER.labels(state='failure', host=self.__extract_host(self.external_host)).inc()
|
|
1199
|
+
for transfer_id in requests_by_eid:
|
|
1200
|
+
responses[transfer_id] = Exception('Could not retrieve transfer information: %s', jobs.content)
|
|
1201
|
+
|
|
1202
|
+
return responses
|
|
1203
|
+
|
|
1204
|
+
def list_se_status(self):
|
|
1205
|
+
"""
|
|
1206
|
+
Get the list of banned Storage Elements.
|
|
1207
|
+
|
|
1208
|
+
:returns: Detailed dictionnary of banned Storage Elements.
|
|
1209
|
+
"""
|
|
1210
|
+
|
|
1211
|
+
try:
|
|
1212
|
+
result = requests.get('%s/ban/se' % self.external_host,
|
|
1213
|
+
verify=self.verify,
|
|
1214
|
+
cert=self.cert,
|
|
1215
|
+
headers=self.headers,
|
|
1216
|
+
timeout=None)
|
|
1217
|
+
except Exception as error:
|
|
1218
|
+
raise Exception('Could not retrieve transfer information: %s', error)
|
|
1219
|
+
if result and result.status_code == 200:
|
|
1220
|
+
return result.json()
|
|
1221
|
+
raise Exception('Could not retrieve transfer information: %s', result.content)
|
|
1222
|
+
|
|
1223
|
+
def get_se_config(self, storage_element):
|
|
1224
|
+
"""
|
|
1225
|
+
Get the Json response for the configuration of a storage element.
|
|
1226
|
+
:returns: a Json result for the configuration of a storage element.
|
|
1227
|
+
:param storage_element: the storage element you want the configuration for.
|
|
1228
|
+
"""
|
|
1229
|
+
|
|
1230
|
+
try:
|
|
1231
|
+
result = requests.get('%s/config/se' % (self.external_host),
|
|
1232
|
+
verify=self.verify,
|
|
1233
|
+
cert=self.cert,
|
|
1234
|
+
headers=self.headers,
|
|
1235
|
+
timeout=None)
|
|
1236
|
+
except Exception:
|
|
1237
|
+
self.logger(logging.WARNING, 'Could not get config of %s on %s - %s', storage_element, self.external_host, str(traceback.format_exc()))
|
|
1238
|
+
if result and result.status_code == 200:
|
|
1239
|
+
C = result.json()
|
|
1240
|
+
config_se = C[storage_element]
|
|
1241
|
+
return config_se
|
|
1242
|
+
raise Exception('Could not get the configuration of %s , status code returned : %s', (storage_element, result.status_code if result else None))
|
|
1243
|
+
|
|
1244
|
+
def set_se_config(self, storage_element, inbound_max_active=None, outbound_max_active=None, inbound_max_throughput=None, outbound_max_throughput=None, staging=None):
|
|
1245
|
+
"""
|
|
1246
|
+
Set the configuration for a storage element. Used for alleviating transfer failures due to timeout.
|
|
1247
|
+
|
|
1248
|
+
:param storage_element: The storage element to be configured
|
|
1249
|
+
:param inbound_max_active: the integer to set the inbound_max_active for the SE.
|
|
1250
|
+
:param outbound_max_active: the integer to set the outbound_max_active for the SE.
|
|
1251
|
+
:param inbound_max_throughput: the float to set the inbound_max_throughput for the SE.
|
|
1252
|
+
:param outbound_max_throughput: the float to set the outbound_max_throughput for the SE.
|
|
1253
|
+
:param staging: the integer to set the staging for the operation of a SE.
|
|
1254
|
+
:returns: JSON post response in case of success, otherwise raise Exception.
|
|
1255
|
+
"""
|
|
1256
|
+
|
|
1257
|
+
params_dict = {storage_element: {'operations': {}, 'se_info': {}}}
|
|
1258
|
+
if staging is not None:
|
|
1259
|
+
try:
|
|
1260
|
+
policy = config_get('policy', 'permission')
|
|
1261
|
+
except Exception:
|
|
1262
|
+
self.logger(logging.WARNING, 'Could not get policy from config')
|
|
1263
|
+
params_dict[storage_element]['operations'] = {policy: {'staging': staging}}
|
|
1264
|
+
# A lot of try-excepts to avoid dictionary overwrite's,
|
|
1265
|
+
# see https://stackoverflow.com/questions/27118687/updating-nested-dictionaries-when-data-has-existing-key/27118776
|
|
1266
|
+
if inbound_max_active is not None:
|
|
1267
|
+
try:
|
|
1268
|
+
params_dict[storage_element]['se_info']['inbound_max_active'] = inbound_max_active
|
|
1269
|
+
except KeyError:
|
|
1270
|
+
params_dict[storage_element]['se_info'] = {'inbound_max_active': inbound_max_active}
|
|
1271
|
+
if outbound_max_active is not None:
|
|
1272
|
+
try:
|
|
1273
|
+
params_dict[storage_element]['se_info']['outbound_max_active'] = outbound_max_active
|
|
1274
|
+
except KeyError:
|
|
1275
|
+
params_dict[storage_element]['se_info'] = {'outbound_max_active': outbound_max_active}
|
|
1276
|
+
if inbound_max_throughput is not None:
|
|
1277
|
+
try:
|
|
1278
|
+
params_dict[storage_element]['se_info']['inbound_max_throughput'] = inbound_max_throughput
|
|
1279
|
+
except KeyError:
|
|
1280
|
+
params_dict[storage_element]['se_info'] = {'inbound_max_throughput': inbound_max_throughput}
|
|
1281
|
+
if outbound_max_throughput is not None:
|
|
1282
|
+
try:
|
|
1283
|
+
params_dict[storage_element]['se_info']['outbound_max_throughput'] = outbound_max_throughput
|
|
1284
|
+
except KeyError:
|
|
1285
|
+
params_dict[storage_element]['se_info'] = {'outbound_max_throughput': outbound_max_throughput}
|
|
1286
|
+
|
|
1287
|
+
params_str = json.dumps(params_dict, cls=APIEncoder)
|
|
1288
|
+
|
|
1289
|
+
try:
|
|
1290
|
+
result = requests.post('%s/config/se' % (self.external_host),
|
|
1291
|
+
verify=self.verify,
|
|
1292
|
+
cert=self.cert,
|
|
1293
|
+
data=params_str,
|
|
1294
|
+
headers=self.headers,
|
|
1295
|
+
timeout=None)
|
|
1296
|
+
|
|
1297
|
+
except Exception:
|
|
1298
|
+
self.logger(logging.WARNING, 'Could not set the config of %s on %s - %s', storage_element, self.external_host, str(traceback.format_exc()))
|
|
1299
|
+
if result and result.status_code == 200:
|
|
1300
|
+
configSe = result.json()
|
|
1301
|
+
return configSe
|
|
1302
|
+
raise Exception('Could not set the configuration of %s , status code returned : %s', (storage_element, result.status_code if result else None))
|
|
1303
|
+
|
|
1304
|
+
def set_se_status(self, storage_element, message, ban=True, timeout=None):
|
|
1305
|
+
"""
|
|
1306
|
+
Ban a Storage Element. Used when a site is in downtime.
|
|
1307
|
+
One can use a timeout in seconds. In that case the jobs will wait before being cancel.
|
|
1308
|
+
If no timeout is specified, the jobs are canceled immediately
|
|
1309
|
+
|
|
1310
|
+
:param storage_element: The Storage Element that will be banned.
|
|
1311
|
+
:param message: The reason of the ban.
|
|
1312
|
+
:param ban: Boolean. If set to True, ban the SE, if set to False unban the SE.
|
|
1313
|
+
:param timeout: if None, send to FTS status 'cancel' else 'waiting' + the corresponding timeout.
|
|
1314
|
+
|
|
1315
|
+
:returns: 0 in case of success, otherwise raise Exception
|
|
1316
|
+
"""
|
|
1317
|
+
|
|
1318
|
+
params_dict = {'storage': storage_element, 'message': message}
|
|
1319
|
+
status = 'CANCEL'
|
|
1320
|
+
if timeout:
|
|
1321
|
+
params_dict['timeout'] = timeout
|
|
1322
|
+
status = 'WAIT'
|
|
1323
|
+
params_dict['status'] = status
|
|
1324
|
+
params_str = json.dumps(params_dict, cls=APIEncoder)
|
|
1325
|
+
|
|
1326
|
+
result = None
|
|
1327
|
+
if ban:
|
|
1328
|
+
try:
|
|
1329
|
+
result = requests.post('%s/ban/se' % self.external_host,
|
|
1330
|
+
verify=self.verify,
|
|
1331
|
+
cert=self.cert,
|
|
1332
|
+
data=params_str,
|
|
1333
|
+
headers=self.headers,
|
|
1334
|
+
timeout=None)
|
|
1335
|
+
except Exception:
|
|
1336
|
+
self.logger(logging.WARNING, 'Could not ban %s on %s - %s', storage_element, self.external_host, str(traceback.format_exc()))
|
|
1337
|
+
if result and result.status_code == 200:
|
|
1338
|
+
return 0
|
|
1339
|
+
raise Exception('Could not ban the storage %s , status code returned : %s', (storage_element, result.status_code if result else None))
|
|
1340
|
+
else:
|
|
1341
|
+
|
|
1342
|
+
try:
|
|
1343
|
+
result = requests.delete('%s/ban/se?storage=%s' % (self.external_host, storage_element),
|
|
1344
|
+
verify=self.verify,
|
|
1345
|
+
cert=self.cert,
|
|
1346
|
+
data=params_str,
|
|
1347
|
+
headers=self.headers,
|
|
1348
|
+
timeout=None)
|
|
1349
|
+
except Exception:
|
|
1350
|
+
self.logger(logging.WARNING, 'Could not unban %s on %s - %s', storage_element, self.external_host, str(traceback.format_exc()))
|
|
1351
|
+
if result and result.status_code == 204:
|
|
1352
|
+
return 0
|
|
1353
|
+
raise Exception('Could not unban the storage %s , status code returned : %s', (storage_element, result.status_code if result else None))
|
|
1354
|
+
|
|
1355
|
+
# Private methods unique to the FTS3 Transfertool
|
|
1356
|
+
|
|
1357
|
+
@staticmethod
|
|
1358
|
+
def __extract_host(external_host):
|
|
1359
|
+
# graphite does not like the dots in the FQDN
|
|
1360
|
+
return urlparse(external_host).hostname.replace('.', '_')
|
|
1361
|
+
|
|
1362
|
+
def __get_transfer_baseid_voname(self):
|
|
1363
|
+
"""
|
|
1364
|
+
Get transfer VO name from the external host.
|
|
1365
|
+
|
|
1366
|
+
:returns base id as a string and VO name as a string.
|
|
1367
|
+
"""
|
|
1368
|
+
result = (None, None)
|
|
1369
|
+
try:
|
|
1370
|
+
key = 'voname:%s' % self.external_host
|
|
1371
|
+
result = REGION_SHORT.get(key)
|
|
1372
|
+
if isinstance(result, NoValue):
|
|
1373
|
+
self.logger(logging.DEBUG, "Refresh transfer baseid and voname for %s", self.external_host)
|
|
1374
|
+
|
|
1375
|
+
get_result = None
|
|
1376
|
+
try:
|
|
1377
|
+
get_result = requests.get('%s/whoami' % self.external_host,
|
|
1378
|
+
verify=self.verify,
|
|
1379
|
+
cert=self.cert,
|
|
1380
|
+
headers=self.headers,
|
|
1381
|
+
timeout=5)
|
|
1382
|
+
except ReadTimeout as error:
|
|
1383
|
+
raise TransferToolTimeout(error)
|
|
1384
|
+
except json.JSONDecodeError as error:
|
|
1385
|
+
raise TransferToolWrongAnswer(error)
|
|
1386
|
+
except Exception as error:
|
|
1387
|
+
self.logger(logging.WARNING, 'Could not get baseid and voname from %s - %s' % (self.external_host, str(error)))
|
|
1388
|
+
|
|
1389
|
+
if get_result and get_result.status_code == 200:
|
|
1390
|
+
baseid = str(get_result.json()['base_id'])
|
|
1391
|
+
voname = str(get_result.json()['vos'][0])
|
|
1392
|
+
result = (baseid, voname)
|
|
1393
|
+
|
|
1394
|
+
REGION_SHORT.set(key, result)
|
|
1395
|
+
|
|
1396
|
+
self.logger(logging.DEBUG, "Get baseid %s and voname %s from %s", baseid, voname, self.external_host)
|
|
1397
|
+
else:
|
|
1398
|
+
self.logger(logging.WARNING, "Failed to get baseid and voname from %s, error: %s", self.external_host, get_result.text if get_result is not None else get_result)
|
|
1399
|
+
result = (None, None)
|
|
1400
|
+
except Exception as error:
|
|
1401
|
+
self.logger(logging.WARNING, "Failed to get baseid and voname from %s: %s" % (self.external_host, str(error)))
|
|
1402
|
+
result = (None, None)
|
|
1403
|
+
return result
|
|
1404
|
+
|
|
1405
|
+
def __get_deterministic_id(self, sid):
|
|
1406
|
+
"""
|
|
1407
|
+
Get deterministic FTS job id.
|
|
1408
|
+
|
|
1409
|
+
:param sid: FTS seed id.
|
|
1410
|
+
:returns: FTS transfer identifier.
|
|
1411
|
+
"""
|
|
1412
|
+
baseid, voname = self.__get_transfer_baseid_voname()
|
|
1413
|
+
if baseid is None or voname is None:
|
|
1414
|
+
return None
|
|
1415
|
+
root = uuid.UUID(baseid)
|
|
1416
|
+
atlas = uuid.uuid5(root, voname)
|
|
1417
|
+
jobid = uuid.uuid5(atlas, sid)
|
|
1418
|
+
return str(jobid)
|
|
1419
|
+
|
|
1420
|
+
def __bulk_query_responses(self, jobs_response, requests_by_eid):
|
|
1421
|
+
if not isinstance(jobs_response, list):
|
|
1422
|
+
jobs_response = [jobs_response]
|
|
1423
|
+
|
|
1424
|
+
responses = {}
|
|
1425
|
+
for job_response in jobs_response:
|
|
1426
|
+
transfer_id = job_response['job_id']
|
|
1427
|
+
if job_response['http_status'] == '200 Ok':
|
|
1428
|
+
files_response = job_response['files']
|
|
1429
|
+
multi_sources = job_response['job_metadata'].get('multi_sources', False)
|
|
1430
|
+
if multi_sources and job_response['job_state'] not in [FTS_STATE.FAILED,
|
|
1431
|
+
FTS_STATE.FINISHEDDIRTY,
|
|
1432
|
+
FTS_STATE.CANCELED,
|
|
1433
|
+
FTS_STATE.FINISHED]:
|
|
1434
|
+
# multipe source replicas jobs is still running. should wait
|
|
1435
|
+
responses[transfer_id] = {}
|
|
1436
|
+
continue
|
|
1437
|
+
|
|
1438
|
+
resps = {}
|
|
1439
|
+
for file_resp in files_response:
|
|
1440
|
+
file_state = file_resp['file_state']
|
|
1441
|
+
# for multiple source replicas jobs, the file_metadata(request_id) will be the same.
|
|
1442
|
+
# The next used file will overwrite the current used one. Only the last used file will return.
|
|
1443
|
+
if multi_sources and file_state == FTS_STATE.NOT_USED:
|
|
1444
|
+
continue
|
|
1445
|
+
|
|
1446
|
+
request_id = file_resp['file_metadata']['request_id']
|
|
1447
|
+
request = requests_by_eid.get(transfer_id, {}).get(request_id)
|
|
1448
|
+
if request is not None:
|
|
1449
|
+
resps[request_id] = FTS3ApiTransferStatusReport(self.external_host, request_id=request_id, request=request,
|
|
1450
|
+
job_response=job_response, file_response=file_resp)
|
|
1451
|
+
|
|
1452
|
+
# multiple source replicas jobs and we found the successful one, it's the final state.
|
|
1453
|
+
if multi_sources and file_state == FTS_STATE.FINISHED:
|
|
1454
|
+
break
|
|
1455
|
+
responses[transfer_id] = resps
|
|
1456
|
+
elif job_response['http_status'] == '404 Not Found':
|
|
1457
|
+
# Lost transfer
|
|
1458
|
+
responses[transfer_id] = None
|
|
1459
|
+
else:
|
|
1460
|
+
responses[transfer_id] = Exception('Could not retrieve transfer information(http_status: %s, http_message: %s)' % (job_response['http_status'],
|
|
1461
|
+
job_response['http_message'] if 'http_message' in job_response else None))
|
|
1462
|
+
return responses
|
|
1463
|
+
|
|
1464
|
+
def __query_details(self, transfer_id):
|
|
1465
|
+
"""
|
|
1466
|
+
Query the detailed status of a transfer in FTS3 via JSON.
|
|
1467
|
+
|
|
1468
|
+
:param transfer_id: FTS transfer identifier as a string.
|
|
1469
|
+
:returns: Detailed transfer status information as a dictionary.
|
|
1470
|
+
"""
|
|
1471
|
+
|
|
1472
|
+
files = None
|
|
1473
|
+
|
|
1474
|
+
files = requests.get('%s/jobs/%s/files' % (self.external_host, transfer_id),
|
|
1475
|
+
verify=self.verify,
|
|
1476
|
+
cert=self.cert,
|
|
1477
|
+
headers=self.headers,
|
|
1478
|
+
timeout=5)
|
|
1479
|
+
if files and (files.status_code == 200 or files.status_code == 207):
|
|
1480
|
+
QUERY_DETAILS_COUNTER.labels(state='success', host=self.__extract_host(self.external_host)).inc()
|
|
1481
|
+
return files.json()
|
|
1482
|
+
|
|
1483
|
+
QUERY_DETAILS_COUNTER.labels(state='failure', host=self.__extract_host(self.external_host)).inc()
|
|
1484
|
+
return
|