rucio 37.0.0rc1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of rucio might be problematic. Click here for more details.

Files changed (487) hide show
  1. rucio/__init__.py +17 -0
  2. rucio/alembicrevision.py +15 -0
  3. rucio/cli/__init__.py +14 -0
  4. rucio/cli/account.py +216 -0
  5. rucio/cli/bin_legacy/__init__.py +13 -0
  6. rucio/cli/bin_legacy/rucio.py +2825 -0
  7. rucio/cli/bin_legacy/rucio_admin.py +2500 -0
  8. rucio/cli/command.py +272 -0
  9. rucio/cli/config.py +72 -0
  10. rucio/cli/did.py +191 -0
  11. rucio/cli/download.py +128 -0
  12. rucio/cli/lifetime_exception.py +33 -0
  13. rucio/cli/replica.py +162 -0
  14. rucio/cli/rse.py +293 -0
  15. rucio/cli/rule.py +158 -0
  16. rucio/cli/scope.py +40 -0
  17. rucio/cli/subscription.py +73 -0
  18. rucio/cli/upload.py +60 -0
  19. rucio/cli/utils.py +226 -0
  20. rucio/client/__init__.py +15 -0
  21. rucio/client/accountclient.py +432 -0
  22. rucio/client/accountlimitclient.py +183 -0
  23. rucio/client/baseclient.py +983 -0
  24. rucio/client/client.py +120 -0
  25. rucio/client/configclient.py +126 -0
  26. rucio/client/credentialclient.py +59 -0
  27. rucio/client/didclient.py +868 -0
  28. rucio/client/diracclient.py +56 -0
  29. rucio/client/downloadclient.py +1783 -0
  30. rucio/client/exportclient.py +44 -0
  31. rucio/client/fileclient.py +50 -0
  32. rucio/client/importclient.py +42 -0
  33. rucio/client/lifetimeclient.py +90 -0
  34. rucio/client/lockclient.py +109 -0
  35. rucio/client/metaconventionsclient.py +140 -0
  36. rucio/client/pingclient.py +44 -0
  37. rucio/client/replicaclient.py +452 -0
  38. rucio/client/requestclient.py +125 -0
  39. rucio/client/richclient.py +317 -0
  40. rucio/client/rseclient.py +746 -0
  41. rucio/client/ruleclient.py +294 -0
  42. rucio/client/scopeclient.py +90 -0
  43. rucio/client/subscriptionclient.py +173 -0
  44. rucio/client/touchclient.py +82 -0
  45. rucio/client/uploadclient.py +969 -0
  46. rucio/common/__init__.py +13 -0
  47. rucio/common/bittorrent.py +234 -0
  48. rucio/common/cache.py +111 -0
  49. rucio/common/checksum.py +168 -0
  50. rucio/common/client.py +122 -0
  51. rucio/common/config.py +788 -0
  52. rucio/common/constants.py +217 -0
  53. rucio/common/constraints.py +17 -0
  54. rucio/common/didtype.py +237 -0
  55. rucio/common/dumper/__init__.py +342 -0
  56. rucio/common/dumper/consistency.py +497 -0
  57. rucio/common/dumper/data_models.py +362 -0
  58. rucio/common/dumper/path_parsing.py +75 -0
  59. rucio/common/exception.py +1208 -0
  60. rucio/common/extra.py +31 -0
  61. rucio/common/logging.py +420 -0
  62. rucio/common/pcache.py +1409 -0
  63. rucio/common/plugins.py +185 -0
  64. rucio/common/policy.py +93 -0
  65. rucio/common/schema/__init__.py +200 -0
  66. rucio/common/schema/generic.py +416 -0
  67. rucio/common/schema/generic_multi_vo.py +395 -0
  68. rucio/common/stomp_utils.py +423 -0
  69. rucio/common/stopwatch.py +55 -0
  70. rucio/common/test_rucio_server.py +154 -0
  71. rucio/common/types.py +483 -0
  72. rucio/common/utils.py +1688 -0
  73. rucio/core/__init__.py +13 -0
  74. rucio/core/account.py +496 -0
  75. rucio/core/account_counter.py +236 -0
  76. rucio/core/account_limit.py +425 -0
  77. rucio/core/authentication.py +620 -0
  78. rucio/core/config.py +437 -0
  79. rucio/core/credential.py +224 -0
  80. rucio/core/did.py +3004 -0
  81. rucio/core/did_meta_plugins/__init__.py +252 -0
  82. rucio/core/did_meta_plugins/did_column_meta.py +331 -0
  83. rucio/core/did_meta_plugins/did_meta_plugin_interface.py +165 -0
  84. rucio/core/did_meta_plugins/elasticsearch_meta.py +407 -0
  85. rucio/core/did_meta_plugins/filter_engine.py +672 -0
  86. rucio/core/did_meta_plugins/json_meta.py +240 -0
  87. rucio/core/did_meta_plugins/mongo_meta.py +229 -0
  88. rucio/core/did_meta_plugins/postgres_meta.py +352 -0
  89. rucio/core/dirac.py +237 -0
  90. rucio/core/distance.py +187 -0
  91. rucio/core/exporter.py +59 -0
  92. rucio/core/heartbeat.py +363 -0
  93. rucio/core/identity.py +301 -0
  94. rucio/core/importer.py +260 -0
  95. rucio/core/lifetime_exception.py +377 -0
  96. rucio/core/lock.py +577 -0
  97. rucio/core/message.py +288 -0
  98. rucio/core/meta_conventions.py +203 -0
  99. rucio/core/monitor.py +448 -0
  100. rucio/core/naming_convention.py +195 -0
  101. rucio/core/nongrid_trace.py +136 -0
  102. rucio/core/oidc.py +1463 -0
  103. rucio/core/permission/__init__.py +161 -0
  104. rucio/core/permission/generic.py +1124 -0
  105. rucio/core/permission/generic_multi_vo.py +1144 -0
  106. rucio/core/quarantined_replica.py +224 -0
  107. rucio/core/replica.py +4483 -0
  108. rucio/core/replica_sorter.py +362 -0
  109. rucio/core/request.py +3091 -0
  110. rucio/core/rse.py +2079 -0
  111. rucio/core/rse_counter.py +185 -0
  112. rucio/core/rse_expression_parser.py +459 -0
  113. rucio/core/rse_selector.py +304 -0
  114. rucio/core/rule.py +4484 -0
  115. rucio/core/rule_grouping.py +1620 -0
  116. rucio/core/scope.py +181 -0
  117. rucio/core/subscription.py +362 -0
  118. rucio/core/topology.py +490 -0
  119. rucio/core/trace.py +375 -0
  120. rucio/core/transfer.py +1531 -0
  121. rucio/core/vo.py +169 -0
  122. rucio/core/volatile_replica.py +151 -0
  123. rucio/daemons/__init__.py +13 -0
  124. rucio/daemons/abacus/__init__.py +13 -0
  125. rucio/daemons/abacus/account.py +116 -0
  126. rucio/daemons/abacus/collection_replica.py +124 -0
  127. rucio/daemons/abacus/rse.py +117 -0
  128. rucio/daemons/atropos/__init__.py +13 -0
  129. rucio/daemons/atropos/atropos.py +242 -0
  130. rucio/daemons/auditor/__init__.py +289 -0
  131. rucio/daemons/auditor/hdfs.py +97 -0
  132. rucio/daemons/auditor/srmdumps.py +355 -0
  133. rucio/daemons/automatix/__init__.py +13 -0
  134. rucio/daemons/automatix/automatix.py +304 -0
  135. rucio/daemons/badreplicas/__init__.py +13 -0
  136. rucio/daemons/badreplicas/minos.py +322 -0
  137. rucio/daemons/badreplicas/minos_temporary_expiration.py +171 -0
  138. rucio/daemons/badreplicas/necromancer.py +196 -0
  139. rucio/daemons/bb8/__init__.py +13 -0
  140. rucio/daemons/bb8/bb8.py +353 -0
  141. rucio/daemons/bb8/common.py +759 -0
  142. rucio/daemons/bb8/nuclei_background_rebalance.py +153 -0
  143. rucio/daemons/bb8/t2_background_rebalance.py +153 -0
  144. rucio/daemons/cache/__init__.py +13 -0
  145. rucio/daemons/cache/consumer.py +133 -0
  146. rucio/daemons/common.py +405 -0
  147. rucio/daemons/conveyor/__init__.py +13 -0
  148. rucio/daemons/conveyor/common.py +562 -0
  149. rucio/daemons/conveyor/finisher.py +529 -0
  150. rucio/daemons/conveyor/poller.py +394 -0
  151. rucio/daemons/conveyor/preparer.py +205 -0
  152. rucio/daemons/conveyor/receiver.py +179 -0
  153. rucio/daemons/conveyor/stager.py +133 -0
  154. rucio/daemons/conveyor/submitter.py +403 -0
  155. rucio/daemons/conveyor/throttler.py +532 -0
  156. rucio/daemons/follower/__init__.py +13 -0
  157. rucio/daemons/follower/follower.py +101 -0
  158. rucio/daemons/hermes/__init__.py +13 -0
  159. rucio/daemons/hermes/hermes.py +534 -0
  160. rucio/daemons/judge/__init__.py +13 -0
  161. rucio/daemons/judge/cleaner.py +159 -0
  162. rucio/daemons/judge/evaluator.py +185 -0
  163. rucio/daemons/judge/injector.py +162 -0
  164. rucio/daemons/judge/repairer.py +154 -0
  165. rucio/daemons/oauthmanager/__init__.py +13 -0
  166. rucio/daemons/oauthmanager/oauthmanager.py +198 -0
  167. rucio/daemons/reaper/__init__.py +13 -0
  168. rucio/daemons/reaper/dark_reaper.py +282 -0
  169. rucio/daemons/reaper/reaper.py +739 -0
  170. rucio/daemons/replicarecoverer/__init__.py +13 -0
  171. rucio/daemons/replicarecoverer/suspicious_replica_recoverer.py +626 -0
  172. rucio/daemons/rsedecommissioner/__init__.py +13 -0
  173. rucio/daemons/rsedecommissioner/config.py +81 -0
  174. rucio/daemons/rsedecommissioner/profiles/__init__.py +24 -0
  175. rucio/daemons/rsedecommissioner/profiles/atlas.py +60 -0
  176. rucio/daemons/rsedecommissioner/profiles/generic.py +452 -0
  177. rucio/daemons/rsedecommissioner/profiles/types.py +93 -0
  178. rucio/daemons/rsedecommissioner/rse_decommissioner.py +280 -0
  179. rucio/daemons/storage/__init__.py +13 -0
  180. rucio/daemons/storage/consistency/__init__.py +13 -0
  181. rucio/daemons/storage/consistency/actions.py +848 -0
  182. rucio/daemons/tracer/__init__.py +13 -0
  183. rucio/daemons/tracer/kronos.py +511 -0
  184. rucio/daemons/transmogrifier/__init__.py +13 -0
  185. rucio/daemons/transmogrifier/transmogrifier.py +762 -0
  186. rucio/daemons/undertaker/__init__.py +13 -0
  187. rucio/daemons/undertaker/undertaker.py +137 -0
  188. rucio/db/__init__.py +13 -0
  189. rucio/db/sqla/__init__.py +52 -0
  190. rucio/db/sqla/constants.py +206 -0
  191. rucio/db/sqla/migrate_repo/__init__.py +13 -0
  192. rucio/db/sqla/migrate_repo/env.py +110 -0
  193. rucio/db/sqla/migrate_repo/versions/01eaf73ab656_add_new_rule_notification_state_progress.py +70 -0
  194. rucio/db/sqla/migrate_repo/versions/0437a40dbfd1_add_eol_at_in_rules.py +47 -0
  195. rucio/db/sqla/migrate_repo/versions/0f1adb7a599a_create_transfer_hops_table.py +59 -0
  196. rucio/db/sqla/migrate_repo/versions/102efcf145f4_added_stuck_at_column_to_rules.py +43 -0
  197. rucio/db/sqla/migrate_repo/versions/13d4f70c66a9_introduce_transfer_limits.py +91 -0
  198. rucio/db/sqla/migrate_repo/versions/140fef722e91_cleanup_distances_table.py +76 -0
  199. rucio/db/sqla/migrate_repo/versions/14ec5aeb64cf_add_request_external_host.py +43 -0
  200. rucio/db/sqla/migrate_repo/versions/156fb5b5a14_add_request_type_to_requests_idx.py +50 -0
  201. rucio/db/sqla/migrate_repo/versions/1677d4d803c8_split_rse_availability_into_multiple.py +68 -0
  202. rucio/db/sqla/migrate_repo/versions/16a0aca82e12_create_index_on_table_replicas_path.py +40 -0
  203. rucio/db/sqla/migrate_repo/versions/1803333ac20f_adding_provenance_and_phys_group.py +45 -0
  204. rucio/db/sqla/migrate_repo/versions/1a29d6a9504c_add_didtype_chck_to_requests.py +60 -0
  205. rucio/db/sqla/migrate_repo/versions/1a80adff031a_create_index_on_rules_hist_recent.py +40 -0
  206. rucio/db/sqla/migrate_repo/versions/1c45d9730ca6_increase_identity_length.py +140 -0
  207. rucio/db/sqla/migrate_repo/versions/1d1215494e95_add_quarantined_replicas_table.py +73 -0
  208. rucio/db/sqla/migrate_repo/versions/1d96f484df21_asynchronous_rules_and_rule_approval.py +74 -0
  209. rucio/db/sqla/migrate_repo/versions/1f46c5f240ac_add_bytes_column_to_bad_replicas.py +43 -0
  210. rucio/db/sqla/migrate_repo/versions/1fc15ab60d43_add_message_history_table.py +50 -0
  211. rucio/db/sqla/migrate_repo/versions/2190e703eb6e_move_rse_settings_to_rse_attributes.py +134 -0
  212. rucio/db/sqla/migrate_repo/versions/21d6b9dc9961_add_mismatch_scheme_state_to_requests.py +64 -0
  213. rucio/db/sqla/migrate_repo/versions/22cf51430c78_add_availability_column_to_table_rses.py +39 -0
  214. rucio/db/sqla/migrate_repo/versions/22d887e4ec0a_create_sources_table.py +64 -0
  215. rucio/db/sqla/migrate_repo/versions/25821a8a45a3_remove_unique_constraint_on_requests.py +51 -0
  216. rucio/db/sqla/migrate_repo/versions/25fc855625cf_added_unique_constraint_to_rules.py +41 -0
  217. rucio/db/sqla/migrate_repo/versions/269fee20dee9_add_repair_cnt_to_locks.py +43 -0
  218. rucio/db/sqla/migrate_repo/versions/271a46ea6244_add_ignore_availability_column_to_rules.py +44 -0
  219. rucio/db/sqla/migrate_repo/versions/277b5fbb41d3_switch_heartbeats_executable.py +53 -0
  220. rucio/db/sqla/migrate_repo/versions/27e3a68927fb_remove_replicas_tombstone_and_replicas_.py +38 -0
  221. rucio/db/sqla/migrate_repo/versions/2854cd9e168_added_rule_id_column.py +47 -0
  222. rucio/db/sqla/migrate_repo/versions/295289b5a800_processed_by_and__at_in_requests.py +45 -0
  223. rucio/db/sqla/migrate_repo/versions/2962ece31cf4_add_nbaccesses_column_in_the_did_table.py +45 -0
  224. rucio/db/sqla/migrate_repo/versions/2af3291ec4c_added_replicas_history_table.py +57 -0
  225. rucio/db/sqla/migrate_repo/versions/2b69addda658_add_columns_for_third_party_copy_read_.py +45 -0
  226. rucio/db/sqla/migrate_repo/versions/2b8e7bcb4783_add_config_table.py +69 -0
  227. rucio/db/sqla/migrate_repo/versions/2ba5229cb54c_add_submitted_at_to_requests_table.py +43 -0
  228. rucio/db/sqla/migrate_repo/versions/2cbee484dcf9_added_column_volume_to_rse_transfer_.py +42 -0
  229. rucio/db/sqla/migrate_repo/versions/2edee4a83846_add_source_to_requests_and_requests_.py +47 -0
  230. rucio/db/sqla/migrate_repo/versions/2eef46be23d4_change_tokens_pk.py +46 -0
  231. rucio/db/sqla/migrate_repo/versions/2f648fc909f3_index_in_rule_history_on_scope_name.py +40 -0
  232. rucio/db/sqla/migrate_repo/versions/3082b8cef557_add_naming_convention_table_and_closed_.py +67 -0
  233. rucio/db/sqla/migrate_repo/versions/30d5206e9cad_increase_oauthrequest_redirect_msg_.py +37 -0
  234. rucio/db/sqla/migrate_repo/versions/30fa38b6434e_add_index_on_service_column_in_the_message_table.py +44 -0
  235. rucio/db/sqla/migrate_repo/versions/3152492b110b_added_staging_area_column.py +77 -0
  236. rucio/db/sqla/migrate_repo/versions/32c7d2783f7e_create_bad_replicas_table.py +60 -0
  237. rucio/db/sqla/migrate_repo/versions/3345511706b8_replicas_table_pk_definition_is_in_.py +72 -0
  238. rucio/db/sqla/migrate_repo/versions/35ef10d1e11b_change_index_on_table_requests.py +42 -0
  239. rucio/db/sqla/migrate_repo/versions/379a19b5332d_create_rse_limits_table.py +65 -0
  240. rucio/db/sqla/migrate_repo/versions/384b96aa0f60_created_rule_history_tables.py +133 -0
  241. rucio/db/sqla/migrate_repo/versions/3ac1660a1a72_extend_distance_table.py +55 -0
  242. rucio/db/sqla/migrate_repo/versions/3ad36e2268b0_create_collection_replicas_updates_table.py +76 -0
  243. rucio/db/sqla/migrate_repo/versions/3c9df354071b_extend_waiting_request_state.py +60 -0
  244. rucio/db/sqla/migrate_repo/versions/3d9813fab443_add_a_new_state_lost_in_badfilesstatus.py +44 -0
  245. rucio/db/sqla/migrate_repo/versions/40ad39ce3160_add_transferred_at_to_requests_table.py +43 -0
  246. rucio/db/sqla/migrate_repo/versions/4207be2fd914_add_notification_column_to_rules.py +64 -0
  247. rucio/db/sqla/migrate_repo/versions/42db2617c364_create_index_on_requests_external_id.py +40 -0
  248. rucio/db/sqla/migrate_repo/versions/436827b13f82_added_column_activity_to_table_requests.py +43 -0
  249. rucio/db/sqla/migrate_repo/versions/44278720f774_update_requests_typ_sta_upd_idx_index.py +44 -0
  250. rucio/db/sqla/migrate_repo/versions/45378a1e76a8_create_collection_replica_table.py +78 -0
  251. rucio/db/sqla/migrate_repo/versions/469d262be19_removing_created_at_index.py +41 -0
  252. rucio/db/sqla/migrate_repo/versions/4783c1f49cb4_create_distance_table.py +59 -0
  253. rucio/db/sqla/migrate_repo/versions/49a21b4d4357_create_index_on_table_tokens.py +44 -0
  254. rucio/db/sqla/migrate_repo/versions/4a2cbedda8b9_add_source_replica_expression_column_to_.py +43 -0
  255. rucio/db/sqla/migrate_repo/versions/4a7182d9578b_added_bytes_length_accessed_at_columns.py +49 -0
  256. rucio/db/sqla/migrate_repo/versions/4bab9edd01fc_create_index_on_requests_rule_id.py +40 -0
  257. rucio/db/sqla/migrate_repo/versions/4c3a4acfe006_new_attr_account_table.py +63 -0
  258. rucio/db/sqla/migrate_repo/versions/4cf0a2e127d4_adding_transient_metadata.py +43 -0
  259. rucio/db/sqla/migrate_repo/versions/4df2c5ddabc0_remove_temporary_dids.py +55 -0
  260. rucio/db/sqla/migrate_repo/versions/50280c53117c_add_qos_class_to_rse.py +45 -0
  261. rucio/db/sqla/migrate_repo/versions/52153819589c_add_rse_id_to_replicas_table.py +43 -0
  262. rucio/db/sqla/migrate_repo/versions/52fd9f4916fa_added_activity_to_rules.py +43 -0
  263. rucio/db/sqla/migrate_repo/versions/53b479c3cb0f_fix_did_meta_table_missing_updated_at_.py +45 -0
  264. rucio/db/sqla/migrate_repo/versions/5673b4b6e843_add_wfms_metadata_to_rule_tables.py +47 -0
  265. rucio/db/sqla/migrate_repo/versions/575767d9f89_added_source_history_table.py +58 -0
  266. rucio/db/sqla/migrate_repo/versions/58bff7008037_add_started_at_to_requests.py +45 -0
  267. rucio/db/sqla/migrate_repo/versions/58c8b78301ab_rename_callback_to_message.py +106 -0
  268. rucio/db/sqla/migrate_repo/versions/5f139f77382a_added_child_rule_id_column.py +55 -0
  269. rucio/db/sqla/migrate_repo/versions/688ef1840840_adding_did_meta_table.py +50 -0
  270. rucio/db/sqla/migrate_repo/versions/6e572a9bfbf3_add_new_split_container_column_to_rules.py +47 -0
  271. rucio/db/sqla/migrate_repo/versions/70587619328_add_comment_column_for_subscriptions.py +43 -0
  272. rucio/db/sqla/migrate_repo/versions/739064d31565_remove_history_table_pks.py +41 -0
  273. rucio/db/sqla/migrate_repo/versions/7541902bf173_add_didsfollowed_and_followevents_table.py +91 -0
  274. rucio/db/sqla/migrate_repo/versions/7ec22226cdbf_new_replica_state_for_temporary_.py +72 -0
  275. rucio/db/sqla/migrate_repo/versions/810a41685bc1_added_columns_rse_transfer_limits.py +49 -0
  276. rucio/db/sqla/migrate_repo/versions/83f991c63a93_correct_rse_expression_length.py +43 -0
  277. rucio/db/sqla/migrate_repo/versions/8523998e2e76_increase_size_of_extended_attributes_.py +43 -0
  278. rucio/db/sqla/migrate_repo/versions/8ea9122275b1_adding_missing_function_based_indices.py +53 -0
  279. rucio/db/sqla/migrate_repo/versions/90f47792bb76_add_clob_payload_to_messages.py +45 -0
  280. rucio/db/sqla/migrate_repo/versions/914b8f02df38_new_table_for_lifetime_model_exceptions.py +68 -0
  281. rucio/db/sqla/migrate_repo/versions/94a5961ddbf2_add_estimator_columns.py +45 -0
  282. rucio/db/sqla/migrate_repo/versions/9a1b149a2044_add_saml_identity_type.py +94 -0
  283. rucio/db/sqla/migrate_repo/versions/9a45bc4ea66d_add_vp_table.py +54 -0
  284. rucio/db/sqla/migrate_repo/versions/9eb936a81eb1_true_is_true.py +72 -0
  285. rucio/db/sqla/migrate_repo/versions/a08fa8de1545_transfer_stats_table.py +55 -0
  286. rucio/db/sqla/migrate_repo/versions/a118956323f8_added_vo_table_and_vo_col_to_rse.py +76 -0
  287. rucio/db/sqla/migrate_repo/versions/a193a275255c_add_status_column_in_messages.py +47 -0
  288. rucio/db/sqla/migrate_repo/versions/a5f6f6e928a7_1_7_0.py +121 -0
  289. rucio/db/sqla/migrate_repo/versions/a616581ee47_added_columns_to_table_requests.py +59 -0
  290. rucio/db/sqla/migrate_repo/versions/a6eb23955c28_state_idx_non_functional.py +52 -0
  291. rucio/db/sqla/migrate_repo/versions/a74275a1ad30_added_global_quota_table.py +54 -0
  292. rucio/db/sqla/migrate_repo/versions/a93e4e47bda_heartbeats.py +64 -0
  293. rucio/db/sqla/migrate_repo/versions/ae2a56fcc89_added_comment_column_to_rules.py +49 -0
  294. rucio/db/sqla/migrate_repo/versions/b0070f3695c8_add_deletedidmeta_table.py +57 -0
  295. rucio/db/sqla/migrate_repo/versions/b4293a99f344_added_column_identity_to_table_tokens.py +43 -0
  296. rucio/db/sqla/migrate_repo/versions/b5493606bbf5_fix_primary_key_for_subscription_history.py +41 -0
  297. rucio/db/sqla/migrate_repo/versions/b7d287de34fd_removal_of_replicastate_source.py +91 -0
  298. rucio/db/sqla/migrate_repo/versions/b818052fa670_add_index_to_quarantined_replicas.py +40 -0
  299. rucio/db/sqla/migrate_repo/versions/b8caac94d7f0_add_comments_column_for_subscriptions_.py +43 -0
  300. rucio/db/sqla/migrate_repo/versions/b96a1c7e1cc4_new_bad_pfns_table_and_bad_replicas_.py +143 -0
  301. rucio/db/sqla/migrate_repo/versions/bb695f45c04_extend_request_state.py +76 -0
  302. rucio/db/sqla/migrate_repo/versions/bc68e9946deb_add_staging_timestamps_to_request.py +50 -0
  303. rucio/db/sqla/migrate_repo/versions/bf3baa1c1474_correct_pk_and_idx_for_history_tables.py +72 -0
  304. rucio/db/sqla/migrate_repo/versions/c0937668555f_add_qos_policy_map_table.py +55 -0
  305. rucio/db/sqla/migrate_repo/versions/c129ccdb2d5_add_lumiblocknr_to_dids.py +43 -0
  306. rucio/db/sqla/migrate_repo/versions/ccdbcd48206e_add_did_type_column_index_on_did_meta_.py +65 -0
  307. rucio/db/sqla/migrate_repo/versions/cebad904c4dd_new_payload_column_for_heartbeats.py +47 -0
  308. rucio/db/sqla/migrate_repo/versions/d1189a09c6e0_oauth2_0_and_jwt_feature_support_adding_.py +146 -0
  309. rucio/db/sqla/migrate_repo/versions/d23453595260_extend_request_state_for_preparer.py +104 -0
  310. rucio/db/sqla/migrate_repo/versions/d6dceb1de2d_added_purge_column_to_rules.py +44 -0
  311. rucio/db/sqla/migrate_repo/versions/d6e2c3b2cf26_remove_third_party_copy_column_from_rse.py +43 -0
  312. rucio/db/sqla/migrate_repo/versions/d91002c5841_new_account_limits_table.py +103 -0
  313. rucio/db/sqla/migrate_repo/versions/e138c364ebd0_extending_columns_for_filter_and_.py +49 -0
  314. rucio/db/sqla/migrate_repo/versions/e59300c8b179_support_for_archive.py +104 -0
  315. rucio/db/sqla/migrate_repo/versions/f1b14a8c2ac1_postgres_use_check_constraints.py +29 -0
  316. rucio/db/sqla/migrate_repo/versions/f41ffe206f37_oracle_global_temporary_tables.py +74 -0
  317. rucio/db/sqla/migrate_repo/versions/f85a2962b021_adding_transfertool_column_to_requests_.py +47 -0
  318. rucio/db/sqla/migrate_repo/versions/fa7a7d78b602_increase_refresh_token_size.py +43 -0
  319. rucio/db/sqla/migrate_repo/versions/fb28a95fe288_add_replicas_rse_id_tombstone_idx.py +37 -0
  320. rucio/db/sqla/migrate_repo/versions/fe1a65b176c9_set_third_party_copy_read_and_write_.py +43 -0
  321. rucio/db/sqla/migrate_repo/versions/fe8ea2fa9788_added_third_party_copy_column_to_rse_.py +43 -0
  322. rucio/db/sqla/models.py +1743 -0
  323. rucio/db/sqla/sautils.py +55 -0
  324. rucio/db/sqla/session.py +529 -0
  325. rucio/db/sqla/types.py +206 -0
  326. rucio/db/sqla/util.py +543 -0
  327. rucio/gateway/__init__.py +13 -0
  328. rucio/gateway/account.py +345 -0
  329. rucio/gateway/account_limit.py +363 -0
  330. rucio/gateway/authentication.py +381 -0
  331. rucio/gateway/config.py +227 -0
  332. rucio/gateway/credential.py +70 -0
  333. rucio/gateway/did.py +987 -0
  334. rucio/gateway/dirac.py +83 -0
  335. rucio/gateway/exporter.py +60 -0
  336. rucio/gateway/heartbeat.py +76 -0
  337. rucio/gateway/identity.py +189 -0
  338. rucio/gateway/importer.py +46 -0
  339. rucio/gateway/lifetime_exception.py +121 -0
  340. rucio/gateway/lock.py +153 -0
  341. rucio/gateway/meta_conventions.py +98 -0
  342. rucio/gateway/permission.py +74 -0
  343. rucio/gateway/quarantined_replica.py +79 -0
  344. rucio/gateway/replica.py +538 -0
  345. rucio/gateway/request.py +330 -0
  346. rucio/gateway/rse.py +632 -0
  347. rucio/gateway/rule.py +437 -0
  348. rucio/gateway/scope.py +100 -0
  349. rucio/gateway/subscription.py +280 -0
  350. rucio/gateway/vo.py +126 -0
  351. rucio/rse/__init__.py +96 -0
  352. rucio/rse/protocols/__init__.py +13 -0
  353. rucio/rse/protocols/bittorrent.py +194 -0
  354. rucio/rse/protocols/cache.py +111 -0
  355. rucio/rse/protocols/dummy.py +100 -0
  356. rucio/rse/protocols/gfal.py +708 -0
  357. rucio/rse/protocols/globus.py +243 -0
  358. rucio/rse/protocols/http_cache.py +82 -0
  359. rucio/rse/protocols/mock.py +123 -0
  360. rucio/rse/protocols/ngarc.py +209 -0
  361. rucio/rse/protocols/posix.py +250 -0
  362. rucio/rse/protocols/protocol.py +361 -0
  363. rucio/rse/protocols/rclone.py +365 -0
  364. rucio/rse/protocols/rfio.py +145 -0
  365. rucio/rse/protocols/srm.py +338 -0
  366. rucio/rse/protocols/ssh.py +414 -0
  367. rucio/rse/protocols/storm.py +195 -0
  368. rucio/rse/protocols/webdav.py +594 -0
  369. rucio/rse/protocols/xrootd.py +302 -0
  370. rucio/rse/rsemanager.py +881 -0
  371. rucio/rse/translation.py +260 -0
  372. rucio/tests/__init__.py +13 -0
  373. rucio/tests/common.py +280 -0
  374. rucio/tests/common_server.py +149 -0
  375. rucio/transfertool/__init__.py +13 -0
  376. rucio/transfertool/bittorrent.py +200 -0
  377. rucio/transfertool/bittorrent_driver.py +50 -0
  378. rucio/transfertool/bittorrent_driver_qbittorrent.py +134 -0
  379. rucio/transfertool/fts3.py +1600 -0
  380. rucio/transfertool/fts3_plugins.py +152 -0
  381. rucio/transfertool/globus.py +201 -0
  382. rucio/transfertool/globus_library.py +181 -0
  383. rucio/transfertool/mock.py +89 -0
  384. rucio/transfertool/transfertool.py +221 -0
  385. rucio/vcsversion.py +11 -0
  386. rucio/version.py +45 -0
  387. rucio/web/__init__.py +13 -0
  388. rucio/web/rest/__init__.py +13 -0
  389. rucio/web/rest/flaskapi/__init__.py +13 -0
  390. rucio/web/rest/flaskapi/authenticated_bp.py +27 -0
  391. rucio/web/rest/flaskapi/v1/__init__.py +13 -0
  392. rucio/web/rest/flaskapi/v1/accountlimits.py +236 -0
  393. rucio/web/rest/flaskapi/v1/accounts.py +1103 -0
  394. rucio/web/rest/flaskapi/v1/archives.py +102 -0
  395. rucio/web/rest/flaskapi/v1/auth.py +1644 -0
  396. rucio/web/rest/flaskapi/v1/common.py +426 -0
  397. rucio/web/rest/flaskapi/v1/config.py +304 -0
  398. rucio/web/rest/flaskapi/v1/credentials.py +213 -0
  399. rucio/web/rest/flaskapi/v1/dids.py +2340 -0
  400. rucio/web/rest/flaskapi/v1/dirac.py +116 -0
  401. rucio/web/rest/flaskapi/v1/export.py +75 -0
  402. rucio/web/rest/flaskapi/v1/heartbeats.py +127 -0
  403. rucio/web/rest/flaskapi/v1/identities.py +285 -0
  404. rucio/web/rest/flaskapi/v1/import.py +132 -0
  405. rucio/web/rest/flaskapi/v1/lifetime_exceptions.py +312 -0
  406. rucio/web/rest/flaskapi/v1/locks.py +358 -0
  407. rucio/web/rest/flaskapi/v1/main.py +91 -0
  408. rucio/web/rest/flaskapi/v1/meta_conventions.py +241 -0
  409. rucio/web/rest/flaskapi/v1/metrics.py +36 -0
  410. rucio/web/rest/flaskapi/v1/nongrid_traces.py +97 -0
  411. rucio/web/rest/flaskapi/v1/ping.py +88 -0
  412. rucio/web/rest/flaskapi/v1/redirect.py +366 -0
  413. rucio/web/rest/flaskapi/v1/replicas.py +1894 -0
  414. rucio/web/rest/flaskapi/v1/requests.py +998 -0
  415. rucio/web/rest/flaskapi/v1/rses.py +2250 -0
  416. rucio/web/rest/flaskapi/v1/rules.py +854 -0
  417. rucio/web/rest/flaskapi/v1/scopes.py +159 -0
  418. rucio/web/rest/flaskapi/v1/subscriptions.py +650 -0
  419. rucio/web/rest/flaskapi/v1/templates/auth_crash.html +80 -0
  420. rucio/web/rest/flaskapi/v1/templates/auth_granted.html +82 -0
  421. rucio/web/rest/flaskapi/v1/traces.py +137 -0
  422. rucio/web/rest/flaskapi/v1/types.py +20 -0
  423. rucio/web/rest/flaskapi/v1/vos.py +278 -0
  424. rucio/web/rest/main.py +18 -0
  425. rucio/web/rest/metrics.py +27 -0
  426. rucio/web/rest/ping.py +27 -0
  427. rucio-37.0.0rc1.data/data/rucio/etc/alembic.ini.template +71 -0
  428. rucio-37.0.0rc1.data/data/rucio/etc/alembic_offline.ini.template +74 -0
  429. rucio-37.0.0rc1.data/data/rucio/etc/globus-config.yml.template +5 -0
  430. rucio-37.0.0rc1.data/data/rucio/etc/ldap.cfg.template +30 -0
  431. rucio-37.0.0rc1.data/data/rucio/etc/mail_templates/rule_approval_request.tmpl +38 -0
  432. rucio-37.0.0rc1.data/data/rucio/etc/mail_templates/rule_approved_admin.tmpl +4 -0
  433. rucio-37.0.0rc1.data/data/rucio/etc/mail_templates/rule_approved_user.tmpl +17 -0
  434. rucio-37.0.0rc1.data/data/rucio/etc/mail_templates/rule_denied_admin.tmpl +6 -0
  435. rucio-37.0.0rc1.data/data/rucio/etc/mail_templates/rule_denied_user.tmpl +17 -0
  436. rucio-37.0.0rc1.data/data/rucio/etc/mail_templates/rule_ok_notification.tmpl +19 -0
  437. rucio-37.0.0rc1.data/data/rucio/etc/rse-accounts.cfg.template +25 -0
  438. rucio-37.0.0rc1.data/data/rucio/etc/rucio.cfg.atlas.client.template +43 -0
  439. rucio-37.0.0rc1.data/data/rucio/etc/rucio.cfg.template +241 -0
  440. rucio-37.0.0rc1.data/data/rucio/etc/rucio_multi_vo.cfg.template +217 -0
  441. rucio-37.0.0rc1.data/data/rucio/requirements.server.txt +297 -0
  442. rucio-37.0.0rc1.data/data/rucio/tools/bootstrap.py +34 -0
  443. rucio-37.0.0rc1.data/data/rucio/tools/merge_rucio_configs.py +144 -0
  444. rucio-37.0.0rc1.data/data/rucio/tools/reset_database.py +40 -0
  445. rucio-37.0.0rc1.data/scripts/rucio +133 -0
  446. rucio-37.0.0rc1.data/scripts/rucio-abacus-account +74 -0
  447. rucio-37.0.0rc1.data/scripts/rucio-abacus-collection-replica +46 -0
  448. rucio-37.0.0rc1.data/scripts/rucio-abacus-rse +78 -0
  449. rucio-37.0.0rc1.data/scripts/rucio-admin +97 -0
  450. rucio-37.0.0rc1.data/scripts/rucio-atropos +60 -0
  451. rucio-37.0.0rc1.data/scripts/rucio-auditor +206 -0
  452. rucio-37.0.0rc1.data/scripts/rucio-automatix +50 -0
  453. rucio-37.0.0rc1.data/scripts/rucio-bb8 +57 -0
  454. rucio-37.0.0rc1.data/scripts/rucio-cache-client +141 -0
  455. rucio-37.0.0rc1.data/scripts/rucio-cache-consumer +42 -0
  456. rucio-37.0.0rc1.data/scripts/rucio-conveyor-finisher +58 -0
  457. rucio-37.0.0rc1.data/scripts/rucio-conveyor-poller +66 -0
  458. rucio-37.0.0rc1.data/scripts/rucio-conveyor-preparer +37 -0
  459. rucio-37.0.0rc1.data/scripts/rucio-conveyor-receiver +44 -0
  460. rucio-37.0.0rc1.data/scripts/rucio-conveyor-stager +76 -0
  461. rucio-37.0.0rc1.data/scripts/rucio-conveyor-submitter +139 -0
  462. rucio-37.0.0rc1.data/scripts/rucio-conveyor-throttler +104 -0
  463. rucio-37.0.0rc1.data/scripts/rucio-dark-reaper +53 -0
  464. rucio-37.0.0rc1.data/scripts/rucio-dumper +160 -0
  465. rucio-37.0.0rc1.data/scripts/rucio-follower +44 -0
  466. rucio-37.0.0rc1.data/scripts/rucio-hermes +54 -0
  467. rucio-37.0.0rc1.data/scripts/rucio-judge-cleaner +89 -0
  468. rucio-37.0.0rc1.data/scripts/rucio-judge-evaluator +137 -0
  469. rucio-37.0.0rc1.data/scripts/rucio-judge-injector +44 -0
  470. rucio-37.0.0rc1.data/scripts/rucio-judge-repairer +44 -0
  471. rucio-37.0.0rc1.data/scripts/rucio-kronos +44 -0
  472. rucio-37.0.0rc1.data/scripts/rucio-minos +53 -0
  473. rucio-37.0.0rc1.data/scripts/rucio-minos-temporary-expiration +50 -0
  474. rucio-37.0.0rc1.data/scripts/rucio-necromancer +120 -0
  475. rucio-37.0.0rc1.data/scripts/rucio-oauth-manager +63 -0
  476. rucio-37.0.0rc1.data/scripts/rucio-reaper +83 -0
  477. rucio-37.0.0rc1.data/scripts/rucio-replica-recoverer +248 -0
  478. rucio-37.0.0rc1.data/scripts/rucio-rse-decommissioner +66 -0
  479. rucio-37.0.0rc1.data/scripts/rucio-storage-consistency-actions +74 -0
  480. rucio-37.0.0rc1.data/scripts/rucio-transmogrifier +77 -0
  481. rucio-37.0.0rc1.data/scripts/rucio-undertaker +76 -0
  482. rucio-37.0.0rc1.dist-info/METADATA +92 -0
  483. rucio-37.0.0rc1.dist-info/RECORD +487 -0
  484. rucio-37.0.0rc1.dist-info/WHEEL +5 -0
  485. rucio-37.0.0rc1.dist-info/licenses/AUTHORS.rst +100 -0
  486. rucio-37.0.0rc1.dist-info/licenses/LICENSE +201 -0
  487. rucio-37.0.0rc1.dist-info/top_level.txt +1 -0
@@ -0,0 +1,1620 @@
1
+ # Copyright European Organization for Nuclear Research (CERN) since 2012
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import logging
16
+ from datetime import datetime
17
+ from typing import TYPE_CHECKING, Any, Optional
18
+
19
+ from sqlalchemy import and_, func, select
20
+ from sqlalchemy.exc import NoResultFound
21
+
22
+ import rucio.core.did
23
+ import rucio.core.lock
24
+ import rucio.core.replica
25
+ from rucio.common.config import config_get_int
26
+ from rucio.common.constants import RseAttr
27
+ from rucio.common.exception import InsufficientTargetRSEs
28
+ from rucio.core import account_counter, rse_counter
29
+ from rucio.core import request as request_core
30
+ from rucio.core.rse import get_rse, get_rse_attribute, get_rse_name
31
+ from rucio.db.sqla import models
32
+ from rucio.db.sqla.constants import OBSOLETE, DIDType, LockState, ReplicaState, RequestType, RuleGrouping
33
+ from rucio.db.sqla.session import transactional_session
34
+
35
+ if TYPE_CHECKING:
36
+ from collections.abc import Sequence
37
+
38
+ from sqlalchemy.orm import Session
39
+
40
+ from rucio.common.types import InternalScope
41
+ from rucio.core.rse_selector import RSESelector
42
+
43
+
44
+ @transactional_session
45
+ def apply_rule_grouping(
46
+ datasetfiles: "Sequence[dict[str, Any]]",
47
+ locks: dict[tuple["InternalScope", str], "Sequence[models.ReplicaLock]"],
48
+ replicas: dict[tuple["InternalScope", str], "Sequence[models.CollectionReplica]"],
49
+ source_replicas: dict[tuple["InternalScope", str], "Sequence[models.CollectionReplica]"],
50
+ rseselector: "RSESelector",
51
+ rule: models.ReplicationRule,
52
+ preferred_rse_ids: Optional["Sequence[str]"] = None,
53
+ source_rses: Optional["Sequence[str]"] = None,
54
+ *,
55
+ session: "Session"
56
+ ) -> tuple[dict[str, list[dict[str, models.RSEFileAssociation]]],
57
+ dict[str, list[dict[str, models.ReplicaLock]]],
58
+ list[dict[str, Any]]]:
59
+ """
60
+ Apply rule grouping to files.
61
+
62
+ :param datasetfiles: Dict holding all datasets and files.
63
+ :param locks: Dict holding all locks.
64
+ :param replicas: Dict holding all replicas.
65
+ :param source_replicas: Dict holding all source_replicas.
66
+ :param rseselector: The RSESelector to be used.
67
+ :param rule: The rule object.
68
+ :param preferred_rse_ids: Preferred RSE's to select.
69
+ :param source_rses: RSE ids of eligible source replicas.
70
+ :param session: Session of the db.
71
+ :returns: Dict of replicas to create, Dict of locks to create, List of transfers to create
72
+ :raises: InsufficientQuota, InsufficientTargetRSEs, RSEOverQuota
73
+ :attention: This method modifies the contents of the locks and replicas input parameters.
74
+ """
75
+
76
+ # locks_to_create = {'rse_id': [locks]}
77
+ # replicas_to_create = {'rse_id': [replicas]}
78
+ # transfers_to_create = [{'dest_rse_id':, 'scope':, 'name':, 'request_type':, 'metadata':}]
79
+
80
+ preferred_rse_ids = preferred_rse_ids or []
81
+ source_rses = source_rses or []
82
+ if rule.grouping == RuleGrouping.NONE:
83
+ replicas_to_create, locks_to_create, \
84
+ transfers_to_create = __apply_rule_to_files_none_grouping(datasetfiles=datasetfiles,
85
+ locks=locks,
86
+ replicas=replicas,
87
+ source_replicas=source_replicas,
88
+ rseselector=rseselector,
89
+ rule=rule,
90
+ preferred_rse_ids=preferred_rse_ids,
91
+ source_rses=source_rses,
92
+ session=session)
93
+ elif rule.grouping == RuleGrouping.ALL:
94
+ replicas_to_create, locks_to_create, \
95
+ transfers_to_create = __apply_rule_to_files_all_grouping(datasetfiles=datasetfiles,
96
+ locks=locks,
97
+ replicas=replicas,
98
+ source_replicas=source_replicas,
99
+ rseselector=rseselector,
100
+ rule=rule,
101
+ preferred_rse_ids=preferred_rse_ids,
102
+ source_rses=source_rses,
103
+ session=session)
104
+ else: # rule.grouping == RuleGrouping.DATASET:
105
+ replicas_to_create, locks_to_create, \
106
+ transfers_to_create = __apply_rule_to_files_dataset_grouping(datasetfiles=datasetfiles,
107
+ locks=locks,
108
+ replicas=replicas,
109
+ source_replicas=source_replicas,
110
+ rseselector=rseselector,
111
+ rule=rule,
112
+ preferred_rse_ids=preferred_rse_ids,
113
+ source_rses=source_rses,
114
+ session=session)
115
+
116
+ return replicas_to_create, locks_to_create, transfers_to_create
117
+
118
+
119
+ @transactional_session
120
+ def repair_stuck_locks_and_apply_rule_grouping(
121
+ datasetfiles: "Sequence[dict[str, Any]]",
122
+ locks: dict[tuple["InternalScope", str], models.ReplicaLock],
123
+ replicas: dict[tuple["InternalScope", str], Any],
124
+ source_replicas: dict[tuple["InternalScope", str], Any],
125
+ rseselector: "RSESelector", rule: models.ReplicationRule,
126
+ source_rses: "Sequence[str]",
127
+ *,
128
+ session: "Session"
129
+ ) -> tuple[dict[str, list[dict[str, models.RSEFileAssociation]]],
130
+ dict[str, list[dict[str, models.ReplicaLock]]],
131
+ list[dict[str, Any]],
132
+ dict[str, list[dict[str, models.ReplicaLock]]]]:
133
+ """
134
+ Apply rule grouping to files.
135
+
136
+ :param datasetfiles: Dict holding all datasets and files.
137
+ :param locks: Dict holding all locks.
138
+ :param replicas: Dict holding all replicas.
139
+ :param source_replicas: Dict holding all source_replicas.
140
+ :param rseselector: The RSESelector to be used.
141
+ :param rule: The rule object.
142
+ :param source_rses: RSE ids of eligible source_rses.
143
+ :param session: Session of the db.
144
+ :returns: List of replicas to create, List of locks to create, List of transfers to create, List of locks to Delete
145
+ :raises: InsufficientQuota, InsufficientTargetRSEs
146
+ :attention: This method modifies the contents of the locks and replicas input parameters.
147
+ """
148
+
149
+ # locks_to_create = {'rse_id': [locks]}
150
+ # replicas_to_create = {'rse_id': [replicas]}
151
+ # transfers_to_create = [{'dest_rse_id':, 'scope':, 'name':, 'request_type':, 'metadata':}]
152
+ # locks_to_delete = {'rse_id': [locks]}
153
+
154
+ if rule.grouping == RuleGrouping.NONE:
155
+ replicas_to_create, locks_to_create, transfers_to_create, \
156
+ locks_to_delete = __repair_stuck_locks_with_none_grouping(datasetfiles=datasetfiles,
157
+ locks=locks,
158
+ replicas=replicas,
159
+ source_replicas=source_replicas,
160
+ rseselector=rseselector,
161
+ rule=rule,
162
+ source_rses=source_rses,
163
+ session=session)
164
+ elif rule.grouping == RuleGrouping.ALL:
165
+ replicas_to_create, locks_to_create, transfers_to_create, \
166
+ locks_to_delete = __repair_stuck_locks_with_all_grouping(datasetfiles=datasetfiles,
167
+ locks=locks,
168
+ replicas=replicas,
169
+ source_replicas=source_replicas,
170
+ rseselector=rseselector,
171
+ rule=rule,
172
+ source_rses=source_rses,
173
+ session=session)
174
+ else:
175
+ replicas_to_create, locks_to_create, transfers_to_create, \
176
+ locks_to_delete = __repair_stuck_locks_with_dataset_grouping(datasetfiles=datasetfiles,
177
+ locks=locks,
178
+ replicas=replicas,
179
+ source_replicas=source_replicas,
180
+ rseselector=rseselector,
181
+ rule=rule,
182
+ source_rses=source_rses,
183
+ session=session)
184
+ return replicas_to_create, locks_to_create, transfers_to_create, locks_to_delete
185
+
186
+
187
+ @transactional_session
188
+ def create_transfer_dict(dest_rse_id, request_type, scope, name, rule, lock=None, bytes_=None, md5=None, adler32=None, ds_scope=None, ds_name=None, copy_pin_lifetime=None, activity=None, retry_count=None, *, session: "Session"):
189
+ """
190
+ This method creates a transfer dictionary and returns it
191
+
192
+ :param dest_rse_id: The destination RSE id.
193
+ :param request_Type: The request type.
194
+ :param scope: The scope of the file.
195
+ :param name: The name of the file.
196
+ :param rule: The rule responsible for the transfer.
197
+ :param lock: The lock responsible for the transfer.
198
+ :param bytes_: The filesize of the file in bytes.
199
+ :param md5: The md5 checksum of the file.
200
+ :param adler32: The adler32 checksum of the file.
201
+ :param ds_scope: Dataset the file belongs to.
202
+ :param ds_name: Dataset the file belongs to.
203
+ :param copy_pin_lifetime: Lifetime in the case of STAGIN requests.
204
+ :param activity: Activity to be used.
205
+ :param session: Session of the db.
206
+ :returns: Request dictionary.
207
+ """
208
+ attributes = {'activity': activity or rule.activity or 'default',
209
+ 'source_replica_expression': rule.source_replica_expression,
210
+ 'lifetime': copy_pin_lifetime,
211
+ 'ds_scope': ds_scope,
212
+ 'ds_name': ds_name,
213
+ 'bytes': bytes_,
214
+ 'md5': md5,
215
+ 'adler32': adler32,
216
+ 'priority': rule.priority,
217
+ # 'allow_tape_source': has_account_attribute(account=rule.account, key='admin', session=session)}
218
+ 'allow_tape_source': True}
219
+
220
+ return {'dest_rse_id': dest_rse_id,
221
+ 'scope': scope,
222
+ 'name': name,
223
+ 'rule_id': rule.id,
224
+ 'attributes': attributes,
225
+ 'request_type': request_type,
226
+ 'retry_count': retry_count,
227
+ 'account': rule.account,
228
+ 'requested_at': lock.created_at if lock else rule.created_at}
229
+
230
+
231
+ @transactional_session
232
+ def __apply_rule_to_files_none_grouping(
233
+ datasetfiles: "Sequence[dict[str, Any]]",
234
+ locks: dict[tuple["InternalScope", str], "Sequence[models.ReplicaLock]"],
235
+ replicas: dict[tuple["InternalScope", str], "Sequence[models.CollectionReplica]"],
236
+ source_replicas: dict[tuple["InternalScope", str], "Sequence[models.CollectionReplica]"],
237
+ rseselector: "RSESelector",
238
+ rule: models.ReplicationRule,
239
+ preferred_rse_ids: Optional["Sequence[str]"] = None,
240
+ source_rses: Optional["Sequence[str]"] = None,
241
+ *,
242
+ session: "Session"
243
+ ) -> tuple[dict[str, list[dict[str, models.RSEFileAssociation]]],
244
+ dict[str, list[dict[str, models.ReplicaLock]]],
245
+ list[dict[str, Any]]]:
246
+ """
247
+ Apply a rule to files with NONE grouping.
248
+
249
+ :param datasetfiles: Dict holding all datasets and files.
250
+ :param locks: Dict holding all locks.
251
+ :param replicas: Dict holding all replicas.
252
+ :param source_replicas: Dict holding all source_replicas.
253
+ :param rseselector: The RSESelector to be used.
254
+ :param rule: The rule object.
255
+ :param preferred_rse_ids: Preferred RSE's to select.
256
+ :param source_rses: RSE ids of eligible source replicas.
257
+ :param session: Session of the db.
258
+ :returns: replicas_to_create, locks_to_create, transfers_to_create
259
+ :raises: InsufficientAccountLimit, InsufficientTargetRSEs
260
+ :attention: This method modifies the contents of the locks and replicas input parameters.
261
+ """
262
+ locks_to_create = {} # {'rse_id': [locks]}
263
+ replicas_to_create = {} # {'rse_id': [replicas]}
264
+ transfers_to_create = [] # [{'dest_rse_id':, 'scope':, 'name':, 'request_type':, 'metadata':}]
265
+ preferred_rse_ids = preferred_rse_ids or []
266
+ source_rses = source_rses or []
267
+
268
+ for dataset in datasetfiles:
269
+ selected_rse_ids = []
270
+ for file in dataset['files']:
271
+ if len([lock for lock in locks[(file['scope'], file['name'])] if lock.rule_id == rule.id]) == rule.copies:
272
+ # Nothing to do as the file already has the requested amount of locks
273
+ continue
274
+ rse_coverage = {str(replica.rse_id): file['bytes'] for replica in replicas[(file['scope'], file['name'])] if replica.state in (ReplicaState.AVAILABLE, ReplicaState.COPYING, ReplicaState.TEMPORARY_UNAVAILABLE)}
275
+ if len(preferred_rse_ids) == 0:
276
+ rse_tuples = rseselector.select_rse(size=file['bytes'],
277
+ preferred_rse_ids=rse_coverage.keys(),
278
+ blocklist=[str(replica.rse_id) for replica in replicas[(file['scope'], file['name'])] if replica.state == ReplicaState.BEING_DELETED],
279
+ existing_rse_size=rse_coverage)
280
+ else:
281
+ rse_tuples = rseselector.select_rse(size=file['bytes'],
282
+ preferred_rse_ids=preferred_rse_ids,
283
+ blocklist=[str(replica.rse_id) for replica in replicas[(file['scope'], file['name'])] if replica.state == ReplicaState.BEING_DELETED],
284
+ existing_rse_size=rse_coverage)
285
+ for rse_tuple in rse_tuples:
286
+ if len([lock for lock in locks[(file['scope'], file['name'])] if lock.rule_id == rule.id and lock.rse_id == rse_tuple[0]]) == 1:
287
+ # Due to a bug a lock could have been already submitted for this, in that case, skip it
288
+ continue
289
+ __create_lock_and_replica(file=file,
290
+ dataset=dataset,
291
+ rule=rule,
292
+ rse_id=rse_tuple[0],
293
+ staging_area=rse_tuple[1],
294
+ availability_write=rse_tuple[2],
295
+ locks_to_create=locks_to_create,
296
+ locks=locks,
297
+ source_rses=source_rses,
298
+ replicas_to_create=replicas_to_create,
299
+ replicas=replicas,
300
+ source_replicas=source_replicas,
301
+ transfers_to_create=transfers_to_create,
302
+ session=session)
303
+ selected_rse_ids.append(rse_tuple[0])
304
+ if dataset['scope'] is not None:
305
+ for rse_id in list(set(selected_rse_ids)):
306
+ try:
307
+ stmt = select(
308
+ models.CollectionReplica
309
+ ).where(
310
+ and_(models.CollectionReplica.scope == dataset['scope'],
311
+ models.CollectionReplica.name == dataset['name'],
312
+ models.CollectionReplica.rse_id == rse_id)
313
+ )
314
+ session.execute(stmt).one()
315
+ except NoResultFound:
316
+ models.CollectionReplica(scope=dataset['scope'],
317
+ name=dataset['name'],
318
+ did_type=DIDType.DATASET,
319
+ rse_id=rse_id,
320
+ bytes=0,
321
+ length=0,
322
+ available_bytes=0,
323
+ available_replicas_cnt=0,
324
+ state=ReplicaState.UNAVAILABLE).save(session=session)
325
+ models.UpdatedCollectionReplica(scope=dataset['scope'],
326
+ name=dataset['name'],
327
+ did_type=DIDType.DATASET).save(flush=False, session=session)
328
+
329
+ return replicas_to_create, locks_to_create, transfers_to_create
330
+
331
+
332
+ @transactional_session
333
+ def __apply_rule_to_files_all_grouping(
334
+ datasetfiles: "Sequence[dict[str, Any]]",
335
+ locks: dict[tuple["InternalScope", str], "Sequence[models.ReplicaLock]"],
336
+ replicas: dict[tuple["InternalScope", str], "Sequence[models.CollectionReplica]"],
337
+ source_replicas: dict[tuple["InternalScope", str], "Sequence[models.CollectionReplica]"],
338
+ rseselector: "RSESelector",
339
+ rule: models.ReplicationRule,
340
+ preferred_rse_ids: Optional["Sequence[str]"] = None,
341
+ source_rses: Optional["Sequence[str]"] = None,
342
+ *,
343
+ session: "Session"
344
+ ) -> tuple[dict[str, list[dict[str, models.RSEFileAssociation]]],
345
+ dict[str, list[dict[str, models.ReplicaLock]]],
346
+ list[dict[str, Any]]]:
347
+ """
348
+ Apply a rule to files with ALL grouping.
349
+
350
+ :param datasetfiles: Dict holding all datasets and files.
351
+ :param locks: Dict holding all locks.
352
+ :param replicas: Dict holding all replicas.
353
+ :param source_replicas: Dict holding all source_replicas.
354
+ :param rseselector: The RSESelector to be used.
355
+ :param rule: The rule object.
356
+ :param preferred_rse_ids: Preferred RSE's to select.
357
+ :param source_rses: RSE ids of eligible source replicas.
358
+ :param session: Session of the db.
359
+ :returns: replicas_to_create, locks_to_create, transfers_to_create
360
+ :raises: InsufficientQuota, InsufficientTargetRSEs
361
+ :attention: This method modifies the contents of the locks and replicas input parameters.
362
+ """
363
+
364
+ locks_to_create = {} # {'rse_id': [locks]}
365
+ replicas_to_create = {} # {'rse_id': [replicas]}
366
+ transfers_to_create = [] # [{'dest_rse_id':, 'scope':, 'name':, 'request_type':, 'metadata':}]
367
+ preferred_rse_ids = preferred_rse_ids or []
368
+ source_rses = source_rses or []
369
+
370
+ bytes_ = 0
371
+ rse_coverage = {} # {'rse_id': coverage }
372
+ blocklist = set()
373
+ for dataset in datasetfiles:
374
+ for file in dataset['files']:
375
+ bytes_ += file['bytes']
376
+ for replica in replicas[(file['scope'], file['name'])]:
377
+ if replica.state == ReplicaState.BEING_DELETED:
378
+ blocklist.add(replica.rse_id)
379
+ continue
380
+ if replica.state in [ReplicaState.AVAILABLE, ReplicaState.COPYING, ReplicaState.TEMPORARY_UNAVAILABLE]:
381
+ if replica.rse_id in rse_coverage:
382
+ rse_coverage[replica.rse_id] += file['bytes']
383
+ else:
384
+ rse_coverage[replica.rse_id] = file['bytes']
385
+
386
+ if not preferred_rse_ids:
387
+ rse_tuples = rseselector.select_rse(size=bytes_,
388
+ preferred_rse_ids=[x[0] for x in sorted(rse_coverage.items(), key=lambda tup: tup[1], reverse=True)],
389
+ blocklist=list(blocklist),
390
+ prioritize_order_over_weight=True,
391
+ existing_rse_size=rse_coverage)
392
+ else:
393
+ rse_tuples = rseselector.select_rse(size=bytes_,
394
+ preferred_rse_ids=preferred_rse_ids,
395
+ blocklist=list(blocklist),
396
+ existing_rse_size=rse_coverage)
397
+ for rse_tuple in rse_tuples:
398
+ for dataset in datasetfiles:
399
+ for file in dataset['files']:
400
+ if len([lock for lock in locks[(file['scope'], file['name'])] if lock.rule_id == rule.id]) == rule.copies:
401
+ continue
402
+ if len([lock for lock in locks[(file['scope'], file['name'])] if lock.rule_id == rule.id and lock.rse_id == rse_tuple[0]]) == 1:
403
+ # Due to a bug a lock could have been already submitted for this, in that case, skip it
404
+ continue
405
+ __create_lock_and_replica(file=file,
406
+ dataset=dataset,
407
+ rule=rule,
408
+ rse_id=rse_tuple[0],
409
+ staging_area=rse_tuple[1],
410
+ availability_write=rse_tuple[2],
411
+ locks_to_create=locks_to_create,
412
+ locks=locks,
413
+ source_rses=source_rses,
414
+ replicas_to_create=replicas_to_create,
415
+ replicas=replicas,
416
+ source_replicas=source_replicas,
417
+ transfers_to_create=transfers_to_create,
418
+ session=session)
419
+ # Add a DatasetLock to the DB
420
+ if dataset['scope'] is not None:
421
+ try:
422
+ stmt = select(
423
+ models.DatasetLock
424
+ ).where(
425
+ and_(models.DatasetLock.scope == dataset['scope'],
426
+ models.DatasetLock.name == dataset['name'],
427
+ models.DatasetLock.rule_id == rule.id,
428
+ models.DatasetLock.rse_id == rse_tuple[0])
429
+ )
430
+ session.execute(stmt).one()
431
+ except NoResultFound:
432
+ # Get dataset Information
433
+ is_open, bytes_, length = True, 0, 0
434
+ try:
435
+ stmt = select(
436
+ models.DataIdentifier.is_open,
437
+ models.DataIdentifier.bytes,
438
+ models.DataIdentifier.length
439
+ ).where(
440
+ and_(models.DataIdentifier.scope == dataset['scope'],
441
+ models.DataIdentifier.name == dataset['name'])
442
+ )
443
+ is_open, bytes_, length = session.execute(stmt).one()
444
+ except NoResultFound:
445
+ pass
446
+
447
+ models.DatasetLock(scope=dataset['scope'],
448
+ name=dataset['name'],
449
+ rule_id=rule.id,
450
+ rse_id=rse_tuple[0],
451
+ state=LockState.REPLICATING,
452
+ account=rule.account,
453
+ length=length if not is_open else None,
454
+ bytes=bytes_ if not is_open else None).save(flush=False, session=session)
455
+ # Add a Dataset Replica to the DB
456
+ if dataset['scope'] is not None:
457
+ try:
458
+ stmt = select(
459
+ models.CollectionReplica
460
+ ).where(
461
+ and_(models.CollectionReplica.scope == dataset['scope'],
462
+ models.CollectionReplica.name == dataset['name'],
463
+ models.CollectionReplica.rse_id == rse_tuple[0])
464
+ )
465
+ session.execute(stmt).one()
466
+ except NoResultFound:
467
+ models.CollectionReplica(scope=dataset['scope'],
468
+ name=dataset['name'],
469
+ did_type=DIDType.DATASET,
470
+ rse_id=rse_tuple[0],
471
+ bytes=0,
472
+ length=0,
473
+ available_bytes=0,
474
+ available_replicas_cnt=0,
475
+ state=ReplicaState.UNAVAILABLE).save(session=session)
476
+ models.UpdatedCollectionReplica(scope=dataset['scope'],
477
+ name=dataset['name'],
478
+ did_type=DIDType.DATASET).save(flush=False, session=session)
479
+
480
+ return replicas_to_create, locks_to_create, transfers_to_create
481
+
482
+
483
+ @transactional_session
484
+ def __apply_rule_to_files_dataset_grouping(
485
+ datasetfiles: "Sequence[dict[str, Any]]",
486
+ locks: dict[tuple["InternalScope", str], "Sequence[models.ReplicaLock]"],
487
+ replicas: dict[tuple["InternalScope", str], "Sequence[models.CollectionReplica]"],
488
+ source_replicas: dict[tuple["InternalScope", str], "Sequence[models.CollectionReplica]"],
489
+ rseselector: "RSESelector",
490
+ rule: models.ReplicationRule,
491
+ preferred_rse_ids: Optional["Sequence[str]"] = None,
492
+ source_rses: Optional["Sequence[str]"] = None,
493
+ *,
494
+ session: "Session"
495
+ ) -> tuple[dict[str, list[dict[str, models.RSEFileAssociation]]],
496
+ dict[str, list[dict[str, models.ReplicaLock]]],
497
+ list[dict[str, Any]]]:
498
+ """
499
+ Apply a rule to files with DATASET grouping.
500
+
501
+ :param datasetfiles: Dict holding all datasets and files.
502
+ :param locks: Dict holding all locks.
503
+ :param replicas: Dict holding all replicas.
504
+ :param source_replicas: Dict holding all source replicas.
505
+ :param rseselector: The RSESelector to be used.
506
+ :param rule: The rule object.
507
+ :param preferred_rse_ids: Preferred RSE's to select.
508
+ :param source_rses: RSE ids of eligible source replicas.
509
+ :param session: Session of the db.
510
+ :returns: replicas_to_create, locks_to_create, transfers_to_create
511
+ :raises: InsufficientQuota, InsufficientTargetRSEs
512
+ :attention: This method modifies the contents of the locks and replicas input parameters.
513
+ """
514
+ locks_to_create = {} # {'rse_id': [locks]}
515
+ replicas_to_create = {} # {'rse_id': [replicas]}
516
+ transfers_to_create = [] # [{'dest_rse_id':, 'scope':, 'name':, 'request_type':, 'metadata':}]
517
+ preferred_rse_ids = preferred_rse_ids or []
518
+ source_rses = source_rses or []
519
+
520
+ for dataset in datasetfiles:
521
+ bytes_ = sum([file['bytes'] for file in dataset['files']])
522
+ rse_coverage = {} # {'rse_id': coverage }
523
+ blocklist = set()
524
+ for file in dataset['files']:
525
+ for replica in replicas[(file['scope'], file['name'])]:
526
+ if replica.state == ReplicaState.BEING_DELETED:
527
+ blocklist.add(replica.rse_id)
528
+ continue
529
+ if replica.state in [ReplicaState.AVAILABLE, ReplicaState.COPYING, ReplicaState.TEMPORARY_UNAVAILABLE]:
530
+ if replica.rse_id in rse_coverage:
531
+ rse_coverage[replica.rse_id] += file['bytes']
532
+ else:
533
+ rse_coverage[replica.rse_id] = file['bytes']
534
+
535
+ if not preferred_rse_ids:
536
+ rse_tuples = rseselector.select_rse(size=bytes_,
537
+ preferred_rse_ids=[x[0] for x in sorted(rse_coverage.items(), key=lambda tup: tup[1], reverse=True)],
538
+ blocklist=list(blocklist),
539
+ prioritize_order_over_weight=True,
540
+ existing_rse_size=rse_coverage)
541
+ else:
542
+ rse_tuples = rseselector.select_rse(size=bytes_,
543
+ preferred_rse_ids=preferred_rse_ids,
544
+ blocklist=list(blocklist),
545
+ existing_rse_size=rse_coverage)
546
+ for rse_tuple in rse_tuples:
547
+ for file in dataset['files']:
548
+ if len([lock for lock in locks[(file['scope'], file['name'])] if lock.rule_id == rule.id]) == rule.copies:
549
+ continue
550
+ if len([lock for lock in locks[(file['scope'], file['name'])] if lock.rule_id == rule.id and lock.rse_id == rse_tuple[0]]) == 1:
551
+ # Due to a bug a lock could have been already submitted for this, in that case, skip it
552
+ continue
553
+ __create_lock_and_replica(file=file,
554
+ dataset=dataset,
555
+ rule=rule,
556
+ rse_id=rse_tuple[0],
557
+ staging_area=rse_tuple[1],
558
+ availability_write=rse_tuple[2],
559
+ locks_to_create=locks_to_create,
560
+ locks=locks,
561
+ source_rses=source_rses,
562
+ replicas_to_create=replicas_to_create,
563
+ replicas=replicas,
564
+ source_replicas=source_replicas,
565
+ transfers_to_create=transfers_to_create,
566
+ session=session)
567
+ # Add a DatasetLock to the DB
568
+ if dataset['scope'] is not None:
569
+ try:
570
+ stmt = select(
571
+ models.DatasetLock
572
+ ).where(
573
+ and_(models.DatasetLock.scope == dataset['scope'],
574
+ models.DatasetLock.name == dataset['name'],
575
+ models.DatasetLock.rule_id == rule.id,
576
+ models.DatasetLock.rse_id == rse_tuple[0])
577
+ )
578
+ session.execute(stmt).one()
579
+ except NoResultFound:
580
+ # Get dataset Information
581
+ is_open, bytes_, length = True, None, None
582
+ try:
583
+ stmt = select(
584
+ models.DataIdentifier.is_open,
585
+ models.DataIdentifier.bytes,
586
+ models.DataIdentifier.length
587
+ ).where(
588
+ and_(models.DataIdentifier.scope == dataset['scope'],
589
+ models.DataIdentifier.name == dataset['name'])
590
+ )
591
+ is_open, bytes_, length = session.execute(stmt).one()
592
+ except NoResultFound:
593
+ pass
594
+
595
+ models.DatasetLock(scope=dataset['scope'],
596
+ name=dataset['name'],
597
+ rule_id=rule.id,
598
+ rse_id=rse_tuple[0],
599
+ state=LockState.REPLICATING,
600
+ account=rule.account,
601
+ length=length if not is_open else None,
602
+ bytes=bytes_ if not is_open else None).save(flush=False, session=session)
603
+
604
+ # Add a Dataset Replica to the DB
605
+ if dataset['scope'] is not None:
606
+ try:
607
+ stmt = select(
608
+ models.CollectionReplica
609
+ ).where(
610
+ and_(models.CollectionReplica.scope == dataset['scope'],
611
+ models.CollectionReplica.name == dataset['name'],
612
+ models.CollectionReplica.rse_id == rse_tuple[0])
613
+ )
614
+ session.execute(stmt).one()
615
+ except NoResultFound:
616
+ models.CollectionReplica(scope=dataset['scope'],
617
+ name=dataset['name'],
618
+ did_type=DIDType.DATASET,
619
+ rse_id=rse_tuple[0],
620
+ bytes=0,
621
+ length=0,
622
+ available_bytes=0,
623
+ available_replicas_cnt=0,
624
+ state=ReplicaState.UNAVAILABLE).save(session=session)
625
+ models.UpdatedCollectionReplica(scope=dataset['scope'],
626
+ name=dataset['name'],
627
+ did_type=DIDType.DATASET).save(flush=False, session=session)
628
+
629
+ return replicas_to_create, locks_to_create, transfers_to_create
630
+
631
+
632
+ @transactional_session
633
+ def __repair_stuck_locks_with_none_grouping(datasetfiles, locks, replicas, source_replicas, rseselector, rule, source_rses, *, session: "Session", logger=logging.log):
634
+ """
635
+ Apply a rule to files with NONE grouping.
636
+
637
+ :param datasetfiles: Dict holding all datasets and files.
638
+ :param locks: Dict holding all locks.
639
+ :param replicas: Dict holding all replicas.
640
+ :param source_replicas: Dict holding all source_replicas.
641
+ :param rseselector: The RSESelector to be used.
642
+ :param rule: The rule object.
643
+ :param source_rses: RSE ids of eligible source replicas.
644
+ :param session: Session of the db.
645
+ :param logger: Optional decorated logger that can be passed from the calling daemons or servers.
646
+ :returns: replicas_to_create, locks_to_create, transfers_to_create, locks_to_delete
647
+ :raises: InsufficientAccountLimit, InsufficientTargetRSEs
648
+ :attention: This method modifies the contents of the locks and replicas input parameters.
649
+ """
650
+
651
+ locks_to_create = {} # {'rse_id': [locks]}
652
+ replicas_to_create = {} # {'rse_id': [replicas]}
653
+ transfers_to_create = [] # [{'dest_rse_id':, 'scope':, 'name':, 'request_type':, 'metadata':}]
654
+ locks_to_delete = {} # {'rse_id': [locks]}
655
+
656
+ selector_rse_dict = rseselector.get_rse_dictionary()
657
+
658
+ # Iterate the datasetfiles structure and search for stuck locks
659
+ for dataset in datasetfiles:
660
+ for file in dataset['files']:
661
+ # Iterate and try to repair STUCK locks
662
+ for lock in [stucked_lock for stucked_lock in locks[(file['scope'], file['name'])] if stucked_lock.rule_id == rule.id and stucked_lock.state == LockState.STUCK]:
663
+ # Check if there are actually already enough locks
664
+ if len([good_lock for good_lock in locks[(file['scope'], file['name'])] if good_lock.rule_id == rule.id and good_lock.state != LockState.STUCK]) >= rule.copies:
665
+ # Remove the lock
666
+ logger(logging.DEBUG, 'There are too many locks for %s:%s for rule %s. Deleting lock', file['scope'], file['name'], str(rule.id))
667
+ if lock.rse_id in locks_to_delete:
668
+ locks_to_delete[lock.rse_id].append(lock)
669
+ else:
670
+ locks_to_delete[lock.rse_id] = [lock]
671
+ rule.locks_stuck_cnt -= 1
672
+ continue
673
+ # Check if the replica is AVAILABLE now
674
+ if [replica for replica in replicas[(file['scope'], file['name'])] if replica.state in [ReplicaState.AVAILABLE, ReplicaState.TEMPORARY_UNAVAILABLE] and replica.rse_id == lock.rse_id]:
675
+ lock.state = LockState.OK
676
+ rule.locks_stuck_cnt -= 1
677
+ rule.locks_ok_cnt += 1
678
+ # Recalculate the replica_lock_cnt
679
+ associated_replica = [replica for replica in replicas[(file['scope'], file['name'])] if replica.state in [ReplicaState.AVAILABLE, ReplicaState.TEMPORARY_UNAVAILABLE] and replica.rse_id == lock.rse_id][0]
680
+ associated_replica.tombstone = None
681
+ stmt = select(
682
+ func.count(models.ReplicaLock.rule_id)
683
+ ).select_from(
684
+ models.ReplicaLock
685
+ ).where(
686
+ and_(models.ReplicaLock.scope == associated_replica.scope,
687
+ models.ReplicaLock.name == associated_replica.name,
688
+ models.ReplicaLock.rse_id == lock.rse_id)
689
+ )
690
+ associated_replica.lock_cnt = session.execute(stmt).scalar_one()
691
+ continue
692
+ # Check if this is a STUCK lock due to source_replica filtering
693
+ if source_rses:
694
+ associated_replica = [replica for replica in replicas[(file['scope'], file['name'])] if replica.rse_id == lock.rse_id][0]
695
+ # Check if there is an eligible source replica for this lock
696
+ if set(source_replicas.get((file['scope'], file['name']), [])).intersection(source_rses) and (selector_rse_dict.get(lock.rse_id, {}).get('availability_write', True) or rule.ignore_availability):
697
+ __update_lock_replica_and_create_transfer(lock=lock,
698
+ replica=associated_replica,
699
+ rule=rule,
700
+ dataset=dataset,
701
+ transfers_to_create=transfers_to_create,
702
+ session=session)
703
+ else:
704
+ blocklist_rses = [bl_lock.rse_id for bl_lock in locks[(file['scope'], file['name'])] if bl_lock.rule_id == rule.id]
705
+ try:
706
+ rse_coverage = {replica.rse_id: file['bytes'] for replica in replicas[(file['scope'], file['name'])] if replica.state in (ReplicaState.AVAILABLE, ReplicaState.COPYING, ReplicaState.TEMPORARY_UNAVAILABLE)}
707
+ rse_tuples = rseselector.select_rse(size=file['bytes'],
708
+ preferred_rse_ids=rse_coverage.keys(),
709
+ copies=1,
710
+ blocklist=[replica.rse_id for replica in replicas[(file['scope'], file['name'])] if replica.state == ReplicaState.BEING_DELETED] + blocklist_rses + [lock.rse_id],
711
+ existing_rse_size=rse_coverage)
712
+ for rse_tuple in rse_tuples:
713
+ __create_lock_and_replica(file=file,
714
+ dataset=dataset,
715
+ rule=rule,
716
+ rse_id=rse_tuple[0],
717
+ staging_area=rse_tuple[1],
718
+ availability_write=rse_tuple[2],
719
+ locks_to_create=locks_to_create,
720
+ locks=locks,
721
+ source_rses=source_rses,
722
+ replicas_to_create=replicas_to_create,
723
+ replicas=replicas,
724
+ source_replicas=source_replicas,
725
+ transfers_to_create=transfers_to_create,
726
+ session=session)
727
+ rule.locks_stuck_cnt -= 1
728
+ __set_replica_unavailable(replica=[replica for replica in replicas[(file['scope'], file['name'])] if replica.rse_id == lock.rse_id][0],
729
+ session=session)
730
+ if lock.rse_id in locks_to_delete:
731
+ locks_to_delete[lock.rse_id].append(lock)
732
+ else:
733
+ locks_to_delete[lock.rse_id] = [lock]
734
+ except InsufficientTargetRSEs:
735
+ # Just retry the already existing lock
736
+ if __is_retry_required(lock=lock, activity=rule.activity) and (selector_rse_dict.get(lock.rse_id, {}).get('availability_write', True) or rule.ignore_availability):
737
+ associated_replica = [replica for replica in replicas[(file['scope'], file['name'])] if replica.rse_id == lock.rse_id][0]
738
+ __update_lock_replica_and_create_transfer(lock=lock,
739
+ replica=associated_replica,
740
+ rule=rule,
741
+ dataset=dataset,
742
+ transfers_to_create=transfers_to_create,
743
+ session=session)
744
+
745
+ return replicas_to_create, locks_to_create, transfers_to_create, locks_to_delete
746
+
747
+
748
+ @transactional_session
749
+ def __repair_stuck_locks_with_all_grouping(datasetfiles, locks, replicas, source_replicas, rseselector, rule, source_rses, *, session: "Session", logger=logging.log):
750
+ """
751
+ Apply a rule to files with ALL grouping.
752
+
753
+ :param datasetfiles: Dict holding all datasets and files.
754
+ :param locks: Dict holding all locks.
755
+ :param replicas: Dict holding all replicas.
756
+ :param source_replicas: Dict holding all source_replicas.
757
+ :param rseselector: The RSESelector to be used.
758
+ :param rule: The rule object.
759
+ :param source_rses: RSE ids of eligible source replicas.
760
+ :param session: Session of the db.
761
+ :param logger: Optional decorated logger that can be passed from the calling daemons or servers.
762
+ :returns: replicas_to_create, locks_to_create, transfers_to_create, locks_to_delete
763
+ :raises: InsufficientAccountLimit, InsufficientTargetRSEs
764
+ :attention: This method modifies the contents of the locks and replicas input parameters.
765
+ """
766
+
767
+ locks_to_create = {} # {'rse_id': [locks]}
768
+ replicas_to_create = {} # {'rse_id': [replicas]}
769
+ transfers_to_create = [] # [{'dest_rse_id':, 'scope':, 'name':, 'request_type':, 'metadata':}]
770
+ locks_to_delete = {} # {'rse_id': [locks]}
771
+
772
+ selector_rse_dict = rseselector.get_rse_dictionary()
773
+
774
+ # Iterate the datasetfiles structure and search for stuck locks
775
+ for dataset in datasetfiles:
776
+ for file in dataset['files']:
777
+ # Iterate and try to repair STUCK locks
778
+ for lock in [stucked_lock for stucked_lock in locks[(file['scope'], file['name'])] if stucked_lock.rule_id == rule.id and stucked_lock.state == LockState.STUCK]:
779
+ # Check if there are actually already enough locks
780
+ if len([good_lock for good_lock in locks[(file['scope'], file['name'])] if good_lock.rule_id == rule.id and good_lock.state != LockState.STUCK]) >= rule.copies:
781
+ # Remove the lock
782
+ logger(logging.DEBUG, 'There are too many locks for %s:%s for rule %s. Deleting lock', file['scope'], file['name'], str(rule.id))
783
+ if lock.rse_id in locks_to_delete:
784
+ locks_to_delete[lock.rse_id].append(lock)
785
+ else:
786
+ locks_to_delete[lock.rse_id] = [lock]
787
+ rule.locks_stuck_cnt -= 1
788
+ continue
789
+ # Check if the replica is AVAILABLE now
790
+ if [replica for replica in replicas[(file['scope'], file['name'])] if replica.state in [ReplicaState.AVAILABLE, ReplicaState.TEMPORARY_UNAVAILABLE] and replica.rse_id == lock.rse_id]:
791
+ lock.state = LockState.OK
792
+ rule.locks_stuck_cnt -= 1
793
+ rule.locks_ok_cnt += 1
794
+ # Recalculate the replica_lock_cnt
795
+ associated_replica = [replica for replica in replicas[(file['scope'], file['name'])] if replica.state in [ReplicaState.AVAILABLE, ReplicaState.TEMPORARY_UNAVAILABLE] and replica.rse_id == lock.rse_id][0]
796
+ associated_replica.tombstone = None
797
+ stmt = select(
798
+ func.count(models.ReplicaLock.rule_id)
799
+ ).select_from(
800
+ models.ReplicaLock
801
+ ).where(
802
+ and_(models.ReplicaLock.scope == associated_replica.scope,
803
+ models.ReplicaLock.name == associated_replica.name,
804
+ models.ReplicaLock.rse_id == lock.rse_id)
805
+ )
806
+ associated_replica.lock_cnt = session.execute(stmt).scalar_one()
807
+ continue
808
+ # Check if this is a STUCK lock due to source_replica filtering
809
+ if source_rses:
810
+ associated_replica = [replica for replica in replicas[(file['scope'], file['name'])] if replica.rse_id == lock.rse_id][0]
811
+ # Check if there is an eligible source replica for this lock
812
+ if set(source_replicas.get((file['scope'], file['name']), [])).intersection(source_rses) and (selector_rse_dict.get(lock.rse_id, {}).get('availability_write', True) or rule.ignore_availability):
813
+ __update_lock_replica_and_create_transfer(lock=lock,
814
+ replica=associated_replica,
815
+ rule=rule,
816
+ dataset=dataset,
817
+ transfers_to_create=transfers_to_create,
818
+ session=session)
819
+ else:
820
+ # Just retry the already existing lock
821
+ if __is_retry_required(lock=lock, activity=rule.activity) and (selector_rse_dict.get(lock.rse_id, {}).get('availability_write', True) or rule.ignore_availability):
822
+ associated_replica = [replica for replica in replicas[(file['scope'], file['name'])] if replica.rse_id == lock.rse_id][0]
823
+ __update_lock_replica_and_create_transfer(lock=lock,
824
+ replica=associated_replica,
825
+ rule=rule,
826
+ dataset=dataset,
827
+ transfers_to_create=transfers_to_create,
828
+ session=session)
829
+
830
+ return replicas_to_create, locks_to_create, transfers_to_create, locks_to_delete
831
+
832
+
833
+ @transactional_session
834
+ def __repair_stuck_locks_with_dataset_grouping(datasetfiles, locks, replicas, source_replicas, rseselector, rule, source_rses, *, session: "Session", logger=logging.log):
835
+ """
836
+ Apply a rule to files with DATASET grouping.
837
+
838
+ :param datasetfiles: Dict holding all datasets and files.
839
+ :param locks: Dict holding all locks.
840
+ :param replicas: Dict holding all replicas.
841
+ :param source_replicas: Dict holding all source_replicas.
842
+ :param rseselector: The RSESelector to be used.
843
+ :param rule: The rule object.
844
+ :param source_rses: RSE ids of eligible source replicas.
845
+ :param session: Session of the db.
846
+ :param logger: Optional decorated logger that can be passed from the calling daemons or servers.
847
+ :returns: replicas_to_create, locks_to_create, transfers_to_create, locks_to_delete
848
+ :raises: InsufficientAccountLimit, InsufficientTargetRSEs
849
+ :attention: This method modifies the contents of the locks and replicas input parameters.
850
+ """
851
+
852
+ locks_to_create = {} # {'rse_id': [locks]}
853
+ replicas_to_create = {} # {'rse_id': [replicas]}
854
+ transfers_to_create = [] # [{'dest_rse_id':, 'scope':, 'name':, 'request_type':, 'metadata':}]
855
+ locks_to_delete = {} # {'rse_id': [locks]}
856
+
857
+ selector_rse_dict = rseselector.get_rse_dictionary()
858
+
859
+ # Iterate the datasetfiles structure and search for stuck locks
860
+ for dataset in datasetfiles:
861
+ for file in dataset['files']:
862
+ # Iterate and try to repair STUCK locks
863
+ for lock in [stucked_lock for stucked_lock in locks[(file['scope'], file['name'])] if stucked_lock.rule_id == rule.id and stucked_lock.state == LockState.STUCK]:
864
+ # Check if there are actually already enough locks
865
+ if len([good_lock for good_lock in locks[(file['scope'], file['name'])] if good_lock.rule_id == rule.id and good_lock.state != LockState.STUCK]) >= rule.copies:
866
+ # Remove the lock
867
+ logger(logging.DEBUG, 'There are too many locks for %s:%s for rule %s. Deleting lock', file['scope'], file['name'], str(rule.id))
868
+ if lock.rse_id in locks_to_delete:
869
+ locks_to_delete[lock.rse_id].append(lock)
870
+ else:
871
+ locks_to_delete[lock.rse_id] = [lock]
872
+ rule.locks_stuck_cnt -= 1
873
+ continue
874
+ # Check if the replica is AVAILABLE now
875
+ if [replica for replica in replicas[(file['scope'], file['name'])] if replica.state in [ReplicaState.AVAILABLE, ReplicaState.TEMPORARY_UNAVAILABLE] and replica.rse_id == lock.rse_id]:
876
+ lock.state = LockState.OK
877
+ rule.locks_stuck_cnt -= 1
878
+ rule.locks_ok_cnt += 1
879
+ # Recalculate the replica_lock_cnt
880
+ associated_replica = [replica for replica in replicas[(file['scope'], file['name'])] if replica.state in [ReplicaState.AVAILABLE, ReplicaState.TEMPORARY_UNAVAILABLE] and replica.rse_id == lock.rse_id][0]
881
+ associated_replica.tombstone = None
882
+ stmt = select(
883
+ func.count(models.ReplicaLock.rule_id)
884
+ ).select_from(
885
+ models.ReplicaLock
886
+ ).where(
887
+ and_(models.ReplicaLock.scope == associated_replica.scope,
888
+ models.ReplicaLock.name == associated_replica.name,
889
+ models.ReplicaLock.rse_id == lock.rse_id)
890
+ )
891
+ associated_replica.lock_cnt = session.execute(stmt).scalar_one()
892
+ continue
893
+ # Check if this is a STUCK lock due to source_replica filtering
894
+ if source_rses:
895
+ associated_replica = [replica for replica in replicas[(file['scope'], file['name'])] if replica.rse_id == lock.rse_id][0]
896
+ # Check if there is an eligible source replica for this lock
897
+ if set(source_replicas.get((file['scope'], file['name']), [])).intersection(source_rses) and (selector_rse_dict.get(lock.rse_id, {}).get('availability_write', True) or rule.ignore_availability):
898
+ __update_lock_replica_and_create_transfer(lock=lock,
899
+ replica=associated_replica,
900
+ rule=rule,
901
+ dataset=dataset,
902
+ transfers_to_create=transfers_to_create,
903
+ session=session)
904
+ else:
905
+ # Just retry the already existing lock
906
+ if __is_retry_required(lock=lock, activity=rule.activity) and (selector_rse_dict.get(lock.rse_id, {}).get('availability_write', True) or rule.ignore_availability):
907
+ associated_replica = [replica for replica in replicas[(file['scope'], file['name'])] if replica.rse_id == lock.rse_id][0]
908
+ __update_lock_replica_and_create_transfer(lock=lock,
909
+ replica=associated_replica,
910
+ rule=rule,
911
+ dataset=dataset,
912
+ transfers_to_create=transfers_to_create,
913
+ session=session)
914
+
915
+ return replicas_to_create, locks_to_create, transfers_to_create, locks_to_delete
916
+
917
+
918
+ def __is_retry_required(lock, activity):
919
+ """
920
+ :param lock: The lock to check.
921
+ :param activity: The activity of the rule.
922
+ """
923
+
924
+ created_at_diff = (datetime.utcnow() - lock.created_at).days * 24 * 3600 + (datetime.utcnow() - lock.created_at).seconds
925
+ updated_at_diff = (datetime.utcnow() - lock.updated_at).days * 24 * 3600 + (datetime.utcnow() - lock.updated_at).seconds
926
+
927
+ if activity == 'Express':
928
+ if updated_at_diff > 3600 * 2:
929
+ return True
930
+ elif activity == 'DebugJudge':
931
+ return True
932
+ elif created_at_diff < 24 * 3600: # First Day
933
+ # Retry every 2 hours
934
+ if updated_at_diff > 3600 * 2:
935
+ return True
936
+ elif created_at_diff < 2 * 24 * 3600: # Second Day
937
+ # Retry every 4 hours
938
+ if updated_at_diff > 3600 * 4:
939
+ return True
940
+ elif created_at_diff < 3 * 24 * 3600: # Third Day
941
+ # Retry every 6 hours
942
+ if updated_at_diff > 3600 * 6:
943
+ return True
944
+ else: # Four and more days
945
+ if updated_at_diff > 3600 * 8:
946
+ return True
947
+ return False
948
+
949
+
950
+ @transactional_session
951
+ def __create_lock_and_replica(file, dataset, rule, rse_id, staging_area, availability_write, locks_to_create, locks, source_rses, replicas_to_create, replicas, source_replicas, transfers_to_create, *, session: "Session", logger=logging.log):
952
+ """
953
+ This method creates a lock and if necessary a new replica and fills the corresponding dictionaries.
954
+
955
+ :param file: File dictionary holding the file information.
956
+ :param dataset: Dataset dictionary holding the dataset information.
957
+ :param rule: Rule object.
958
+ :param rse_id: RSE id the lock and replica should be created at.
959
+ :param staging_area: Boolean variable if the RSE is a staging area.
960
+ :param availability_write: Boolean variable if the RSE is write enabled.
961
+ :param locks_to_create: Dictionary of the locks to create.
962
+ :param locks: Dictionary of all locks.
963
+ :param source_rses: RSE ids of eligible source replicas.
964
+ :param replicas_to_create: Dictionary of the replicas to create.
965
+ :param replicas: Dictionary of the replicas.
966
+ :param source_replicas: Dictionary of the source replicas.
967
+ :param transfers_to_create: List of transfers to create.
968
+ :param session: The db session in use.
969
+ :param logger: Optional decorated logger that can be passed from the calling daemons or servers.
970
+ :returns: True, if the created lock is replicating, False otherwise.
971
+ :attention: This method modifies the contents of the locks, locks_to_create, replicas_to_create and replicas input parameters.
972
+ """
973
+
974
+ if rule.expires_at:
975
+ copy_pin_lifetime = rule.expires_at - datetime.utcnow()
976
+ copy_pin_lifetime = copy_pin_lifetime.seconds + copy_pin_lifetime.days * 24 * 3600
977
+ else:
978
+ copy_pin_lifetime = None
979
+
980
+ # If it is a Staging Area, the pin has to be extended
981
+ if staging_area:
982
+ transfers_to_create.append(create_transfer_dict(dest_rse_id=rse_id,
983
+ request_type=RequestType.STAGEIN,
984
+ scope=file['scope'],
985
+ name=file['name'],
986
+ rule=rule,
987
+ bytes_=file['bytes'],
988
+ md5=file['md5'],
989
+ adler32=file['adler32'],
990
+ ds_scope=dataset['scope'],
991
+ ds_name=dataset['name'],
992
+ copy_pin_lifetime=copy_pin_lifetime,
993
+ session=session))
994
+
995
+ # If staging_required type RSE then set pin to RSE attribute maximum_pin_lifetime
996
+ staging_required = get_rse_attribute(rse_id, RseAttr.STAGING_REQUIRED, session=session)
997
+ maximum_pin_lifetime = get_rse_attribute(rse_id, RseAttr.MAXIMUM_PIN_LIFETIME, session=session)
998
+
999
+ if staging_required:
1000
+ if (not copy_pin_lifetime and maximum_pin_lifetime) or (copy_pin_lifetime and maximum_pin_lifetime and copy_pin_lifetime < int(maximum_pin_lifetime)):
1001
+ copy_pin_lifetime = maximum_pin_lifetime
1002
+ rse_name = get_rse_name(rse_id=rse_id, session=session)
1003
+ logger(logging.DEBUG, f'Destination RSE {rse_name} is type staging_required with pin value: {copy_pin_lifetime}')
1004
+
1005
+ existing_replicas = [replica for replica in replicas[(file['scope'], file['name'])] if replica.rse_id == rse_id]
1006
+
1007
+ if existing_replicas: # A replica already exists (But could be UNAVAILABLE)
1008
+ existing_replica = existing_replicas[0]
1009
+
1010
+ # Replica is fully available -- AVAILABLE
1011
+ if existing_replica.state in [ReplicaState.AVAILABLE, ReplicaState.TEMPORARY_UNAVAILABLE]:
1012
+ new_lock = __create_lock(rule=rule,
1013
+ rse_id=rse_id,
1014
+ scope=file['scope'],
1015
+ name=file['name'],
1016
+ bytes_=file['bytes'],
1017
+ existing_replica=existing_replica,
1018
+ state=LockState.OK if not staging_required else LockState.REPLICATING)
1019
+ if rse_id not in locks_to_create:
1020
+ locks_to_create[rse_id] = []
1021
+ locks_to_create[rse_id].append(new_lock)
1022
+ locks[(file['scope'], file['name'])].append(new_lock)
1023
+ if not staging_required:
1024
+ return False
1025
+
1026
+ transfers_to_create.append(create_transfer_dict(dest_rse_id=rse_id,
1027
+ request_type=RequestType.STAGEIN,
1028
+ scope=file['scope'],
1029
+ name=file['name'],
1030
+ rule=rule,
1031
+ lock=new_lock,
1032
+ bytes_=file['bytes'],
1033
+ md5=file['md5'],
1034
+ adler32=file['adler32'],
1035
+ ds_scope=dataset['scope'],
1036
+ ds_name=dataset['name'],
1037
+ copy_pin_lifetime=copy_pin_lifetime,
1038
+ session=session))
1039
+
1040
+ # Replica is not available -- UNAVAILABLE
1041
+ elif existing_replica.state == ReplicaState.UNAVAILABLE:
1042
+ available_source_replica = True
1043
+ if source_rses:
1044
+ available_source_replica = False
1045
+ # Check if there is an eligible source replica for this lock
1046
+ if set(source_replicas.get((file['scope'], file['name']), [])).intersection(source_rses):
1047
+ available_source_replica = True
1048
+ new_lock = __create_lock(rule=rule,
1049
+ rse_id=rse_id,
1050
+ scope=file['scope'],
1051
+ name=file['name'],
1052
+ bytes_=file['bytes'],
1053
+ existing_replica=existing_replica,
1054
+ state=LockState.REPLICATING if (available_source_replica and availability_write) else LockState.STUCK)
1055
+ if rse_id not in locks_to_create:
1056
+ locks_to_create[rse_id] = []
1057
+ locks_to_create[rse_id].append(new_lock)
1058
+ locks[(file['scope'], file['name'])].append(new_lock)
1059
+ if not staging_area and not staging_required and available_source_replica and availability_write:
1060
+ transfers_to_create.append(create_transfer_dict(dest_rse_id=rse_id,
1061
+ request_type=RequestType.TRANSFER,
1062
+ scope=file['scope'],
1063
+ name=file['name'],
1064
+ rule=rule,
1065
+ lock=new_lock,
1066
+ bytes_=file['bytes'],
1067
+ md5=file['md5'],
1068
+ adler32=file['adler32'],
1069
+ ds_scope=dataset['scope'],
1070
+ ds_name=dataset['name'],
1071
+ session=session))
1072
+ return True
1073
+ return False
1074
+ # Replica is not available at the rse yet -- COPYING
1075
+ else:
1076
+ new_lock = __create_lock(rule=rule,
1077
+ rse_id=rse_id,
1078
+ scope=file['scope'],
1079
+ name=file['name'],
1080
+ bytes_=file['bytes'],
1081
+ existing_replica=existing_replica,
1082
+ state=LockState.REPLICATING)
1083
+ if rse_id not in locks_to_create:
1084
+ locks_to_create[rse_id] = []
1085
+ locks_to_create[rse_id].append(new_lock)
1086
+ locks[(file['scope'], file['name'])].append(new_lock)
1087
+ return True
1088
+ else: # Replica has to be created
1089
+ available_source_replica = True
1090
+ if source_rses:
1091
+ available_source_replica = False
1092
+ # Check if there is an eligible source replica for this lock
1093
+ if set(source_replicas.get((file['scope'], file['name']), [])).intersection(source_rses):
1094
+ available_source_replica = True
1095
+
1096
+ new_replica = __create_replica(rse_id=rse_id,
1097
+ scope=file['scope'],
1098
+ name=file['name'],
1099
+ bytes_=file['bytes'],
1100
+ md5=file['md5'],
1101
+ adler32=file['adler32'],
1102
+ state=ReplicaState.COPYING if (available_source_replica and availability_write) else ReplicaState.UNAVAILABLE)
1103
+ if rse_id not in replicas_to_create:
1104
+ replicas_to_create[rse_id] = []
1105
+ replicas_to_create[rse_id].append(new_replica)
1106
+ replicas[(file['scope'], file['name'])].append(new_replica)
1107
+
1108
+ new_lock = __create_lock(rule=rule,
1109
+ rse_id=rse_id,
1110
+ scope=file['scope'],
1111
+ name=file['name'],
1112
+ bytes_=file['bytes'],
1113
+ existing_replica=new_replica,
1114
+ state=LockState.REPLICATING if (available_source_replica and availability_write) else LockState.STUCK)
1115
+ if rse_id not in locks_to_create:
1116
+ locks_to_create[rse_id] = []
1117
+ locks_to_create[rse_id].append(new_lock)
1118
+ locks[(file['scope'], file['name'])].append(new_lock)
1119
+
1120
+ if not staging_area and not staging_required and available_source_replica and availability_write:
1121
+ transfers_to_create.append(create_transfer_dict(dest_rse_id=rse_id,
1122
+ request_type=RequestType.TRANSFER,
1123
+ scope=file['scope'],
1124
+ name=file['name'],
1125
+ rule=rule,
1126
+ lock=new_lock,
1127
+ bytes_=file['bytes'],
1128
+ md5=file['md5'],
1129
+ adler32=file['adler32'],
1130
+ ds_scope=dataset['scope'],
1131
+ ds_name=dataset['name'],
1132
+ session=session))
1133
+ return True
1134
+ elif staging_required:
1135
+ transfers_to_create.append(create_transfer_dict(dest_rse_id=rse_id,
1136
+ request_type=RequestType.TRANSFER,
1137
+ scope=file['scope'],
1138
+ name=file['name'],
1139
+ rule=rule,
1140
+ lock=new_lock,
1141
+ bytes_=file['bytes'],
1142
+ md5=file['md5'],
1143
+ adler32=file['adler32'],
1144
+ ds_scope=dataset['scope'],
1145
+ ds_name=dataset['name'],
1146
+ session=session))
1147
+ return True
1148
+ return False
1149
+
1150
+
1151
+ def __create_lock(rule, rse_id, scope, name, bytes_, state, existing_replica, logger=logging.log):
1152
+ """
1153
+ Create and return a new SQLAlchemy Lock object.
1154
+
1155
+ :param rule: The SQLAlchemy rule object.
1156
+ :param rse_id: The rse_id of the lock.
1157
+ :param scope: The scope of the lock.
1158
+ :param name: The name of the lock.
1159
+ :param bytes_: Bytes of the lock.
1160
+ :param state: State of the lock.
1161
+ :param existing_replica: Replica object.
1162
+ :param logger: Optional decorated logger that can be passed from the calling daemons or servers.
1163
+ """
1164
+
1165
+ new_lock = models.ReplicaLock(rule_id=rule.id,
1166
+ rse_id=rse_id,
1167
+ scope=scope,
1168
+ name=name,
1169
+ account=rule.account,
1170
+ bytes=bytes_,
1171
+ state=state)
1172
+ if state == LockState.OK:
1173
+ existing_replica.lock_cnt += 1
1174
+ existing_replica.tombstone = None
1175
+ rule.locks_ok_cnt += 1
1176
+ logger(logging.DEBUG, 'Creating OK Lock %s:%s on %s for rule %s', scope, name, rse_id, str(rule.id))
1177
+ elif state == LockState.REPLICATING:
1178
+ existing_replica.state = ReplicaState.COPYING
1179
+ existing_replica.lock_cnt += 1
1180
+ existing_replica.tombstone = None
1181
+ rule.locks_replicating_cnt += 1
1182
+ logger(logging.DEBUG, 'Creating REPLICATING Lock %s:%s on %s for rule %s', scope, rse_id, name, str(rule.id))
1183
+ elif state == LockState.STUCK:
1184
+ existing_replica.lock_cnt += 1
1185
+ existing_replica.tombstone = None
1186
+ rule.locks_stuck_cnt += 1
1187
+ logger(logging.DEBUG, 'Creating STUCK Lock %s:%s on %s for rule %s', scope, name, rse_id, str(rule.id))
1188
+ return new_lock
1189
+
1190
+
1191
+ def __create_replica(rse_id, scope, name, bytes_, state, md5, adler32, logger=logging.log):
1192
+ """
1193
+ Create and return a new SQLAlchemy replica object.
1194
+
1195
+ :param rse_id: RSE id of the replica.
1196
+ :param scope: Scope of the replica.
1197
+ :param name: Name of the replica.
1198
+ :param bytes_: Bytes of the replica.
1199
+ :param state: State of the replica.
1200
+ :param md5: MD5 checksum of the replica.
1201
+ :param adler32: ADLER32 checksum of the replica.
1202
+ :param logger: Optional decorated logger that can be passed from the calling daemons or servers.
1203
+ """
1204
+
1205
+ new_replica = models.RSEFileAssociation(rse_id=rse_id,
1206
+ scope=scope,
1207
+ name=name,
1208
+ bytes=bytes_,
1209
+ md5=md5,
1210
+ adler32=adler32,
1211
+ tombstone=None,
1212
+ state=state,
1213
+ lock_cnt=0)
1214
+ logger(logging.DEBUG, 'Creating %s replica for %s:%s on %s', state, scope, name, rse_id)
1215
+ return new_replica
1216
+
1217
+
1218
+ @transactional_session
1219
+ def __update_lock_replica_and_create_transfer(lock, replica, rule, dataset, transfers_to_create, *, session: "Session", logger=logging.log):
1220
+ """
1221
+ This method updates a lock and replica and fills the corresponding dictionaries.
1222
+
1223
+ :param lock: The lock to update.
1224
+ :param replica: The replica to update.
1225
+ :param rule: Rule to update.
1226
+ :param dataset: Dataset dictionary holding the dataset information.
1227
+ :param transfers_to_create: List of transfers to create.
1228
+ :param session: The db session in use.
1229
+ :param logger: Optional decorated logger that can be passed from the calling daemons or servers.
1230
+ :attention: This method modifies the contents of the transfers_to_create input parameters.
1231
+ """
1232
+
1233
+ logger(logging.DEBUG, 'Updating Lock %s:%s for rule %s', lock.scope, lock.name, str(rule.id))
1234
+ lock.state = LockState.REPLICATING
1235
+ rule.locks_stuck_cnt -= 1
1236
+ rule.locks_replicating_cnt += 1
1237
+ replica.state = ReplicaState.COPYING
1238
+
1239
+ if not lock.repair_cnt:
1240
+ lock.repair_cnt = 1
1241
+ else:
1242
+ lock.repair_cnt += 1
1243
+
1244
+ if get_rse(rse_id=lock.rse_id, session=session)['staging_area']:
1245
+ copy_pin_lifetime = rule.expires_at - datetime.utcnow()
1246
+ copy_pin_lifetime = copy_pin_lifetime.seconds + copy_pin_lifetime.days * 24 * 3600
1247
+ transfers_to_create.append(create_transfer_dict(dest_rse_id=lock.rse_id,
1248
+ scope=lock.scope,
1249
+ name=lock.name,
1250
+ rule=rule,
1251
+ lock=lock,
1252
+ bytes_=replica.bytes,
1253
+ md5=replica.md5,
1254
+ adler32=replica.adler32,
1255
+ ds_scope=dataset['scope'],
1256
+ ds_name=dataset['name'],
1257
+ copy_pin_lifetime=copy_pin_lifetime,
1258
+ request_type=RequestType.STAGEIN,
1259
+ session=session))
1260
+ else:
1261
+ transfers_to_create.append(create_transfer_dict(dest_rse_id=lock.rse_id,
1262
+ scope=lock.scope,
1263
+ name=lock.name,
1264
+ rule=rule,
1265
+ lock=lock,
1266
+ bytes_=replica.bytes,
1267
+ md5=replica.md5,
1268
+ adler32=replica.adler32,
1269
+ ds_scope=dataset['scope'],
1270
+ ds_name=dataset['name'],
1271
+ request_type=RequestType.TRANSFER,
1272
+ retry_count=1,
1273
+ session=session))
1274
+
1275
+
1276
+ @transactional_session
1277
+ def __set_replica_unavailable(replica, *, session: "Session"):
1278
+ """
1279
+ This method updates a replica and sets it to UNAVAILABLE.
1280
+
1281
+ :param replica: The replica to update.
1282
+ :param session: The db session in use.
1283
+ """
1284
+
1285
+ replica.lock_cnt -= 1
1286
+ if replica.lock_cnt == 0:
1287
+ replica.tombstone = OBSOLETE
1288
+ replica.state = ReplicaState.UNAVAILABLE
1289
+
1290
+
1291
+ # # debug helper functions used in apply_rule
1292
+ #
1293
+ # def prnt(x, header=None):
1294
+ # print()
1295
+ # if header:
1296
+ # print(header)
1297
+ # if isinstance(x, list) and len(x):
1298
+ # for elem in x:
1299
+ # print(' ', elem)
1300
+ # elif isinstance(x, dict) and len(x) and isinstance(x.values()[0], list):
1301
+ # for k, v in x.items():
1302
+ # if isinstance(v,list) and len(v):
1303
+ # print(' ', k, ':')
1304
+ # for elem in v:
1305
+ # print(' ', elem)
1306
+ # else:
1307
+ # print(' ', k, ':', v)
1308
+ # else:
1309
+ # print(x)
1310
+ #
1311
+ # import os
1312
+ # def mem():
1313
+ # # start your debug python session with harmless -R option to easily grep it out
1314
+ # os.system("ps -U root -o pid,user,rss:10,vsz:10,args:100 | grep 'python -R' | grep -v bin | grep -v grep")
1315
+
1316
+
1317
+ @transactional_session
1318
+ def apply_rule(did, rule, rses, source_rses, rseselector, *, session: "Session", logger=logging.log):
1319
+ """
1320
+ Apply a replication rule to one did.
1321
+
1322
+ :param did: the did object
1323
+ :param rule: the rule object
1324
+ :param rses: target rses_ids
1325
+ :param source_rses: source rses_ids
1326
+ :param rseselector: the rseselector object
1327
+ :param logger: Optional decorated logger that can be passed from the calling daemons or servers.
1328
+ :param session: the database session in use
1329
+ """
1330
+
1331
+ max_partition_size = config_get_int('rules', 'apply_rule_max_partition_size', default=2000, session=session) # process dataset files in bunches of max this size
1332
+
1333
+ # accounting counters
1334
+ rse_counters_files = {}
1335
+ rse_counters_bytes = {}
1336
+ account_counters_files = {}
1337
+ account_counters_bytes = {}
1338
+
1339
+ if did.did_type == DIDType.FILE:
1340
+ # NOTE: silently ignore rule.grouping
1341
+ if True: # instead of -> if rule.grouping == RuleGrouping.NONE:
1342
+ locks = {} # {(scope,name): [SQLAlchemy]}
1343
+ replicas = {} # {(scope, name): [SQLAlchemy]}
1344
+ source_replicas = {} # {(scope, name): [rse_id]
1345
+ # get files and replicas, lock the replicas
1346
+ replicas[(did.scope, did.name)] = rucio.core.replica.get_and_lock_file_replicas(scope=did.scope, name=did.name, nowait=True, restrict_rses=rses,
1347
+ session=session)
1348
+ # prnt(did, 'file')
1349
+ # prnt(replicas, 'replicas')
1350
+
1351
+ # get and lock the locks
1352
+ locks[(did.scope, did.name)] = rucio.core.lock.get_replica_locks(scope=did.scope, name=did.name, nowait=True, restrict_rses=rses,
1353
+ session=session)
1354
+ # prnt(locks, 'locks')
1355
+
1356
+ # if needed get source replicas
1357
+ if source_rses:
1358
+ source_replicas[(did.scope, did.name)] = rucio.core.replica.get_source_replicas(scope=did.scope, name=did.name, source_rses=source_rses,
1359
+ session=session)
1360
+ else:
1361
+ source_replicas = {}
1362
+ # prnt(source_replicas, 'source_replicas')
1363
+
1364
+ # to align code with cases below, create file dict
1365
+ file = {'name': did.name, 'scope': did.scope,
1366
+ 'bytes': did.bytes, 'md5': did.md5, 'adler32': did.adler32}
1367
+
1368
+ # calculate target RSEs
1369
+ rse_coverage = {replica.rse_id: file['bytes'] for replica in replicas[(file['scope'], file['name'])]}
1370
+ # prnt(rse_coverage)
1371
+ preferred_rse_ids = rse_coverage.keys()
1372
+ # prnt(preferred_rse_ids)
1373
+ rse_tuples = rseselector.select_rse(size=file['bytes'], preferred_rse_ids=preferred_rse_ids,
1374
+ prioritize_order_over_weight=True, existing_rse_size=rse_coverage)
1375
+ # prnt(rse_tuples)
1376
+
1377
+ # initialize accumulators for __create_lock_and_replica calls
1378
+ locks_to_create = {} # {'rse_id': [locks]}
1379
+ replicas_to_create = {} # {'rse_id': [replicas]}
1380
+ transfers_to_create = [] # [{'dest_rse_id':, 'scope':, 'name':, 'request_type':, 'metadata':}]
1381
+
1382
+ for rse_id, staging_area, availability_write in rse_tuples:
1383
+ # check for bug ????
1384
+ if len([lock for lock in locks[(file['scope'], file['name'])] if lock.rule_id == rule.id and lock.rse_id == rse_id]) == 1:
1385
+ logger(logging.DEBUG, '>>> WARNING unexpected duplicate lock for file %s at RSE %s' % (file, rse_id))
1386
+ continue
1387
+ # proceed
1388
+ __create_lock_and_replica(file=file, dataset={'scope': None, 'name': None}, rule=rule,
1389
+ rse_id=rse_id, staging_area=staging_area, availability_write=availability_write, source_rses=source_rses,
1390
+ replicas=replicas, locks=locks, source_replicas=source_replicas,
1391
+ locks_to_create=locks_to_create, replicas_to_create=replicas_to_create, transfers_to_create=transfers_to_create,
1392
+ session=session)
1393
+
1394
+ # prnt(locks_to_create, 'locks_to_create')
1395
+ # prnt(replicas_to_create, 'replicas_to_create')
1396
+ # prnt(transfers_to_create, 'transfers_to_create')
1397
+
1398
+ # flush to DB
1399
+ session.add_all([item for sublist in replicas_to_create.values() for item in sublist])
1400
+ session.add_all([item for sublist in locks_to_create.values() for item in sublist])
1401
+ request_core.queue_requests(requests=transfers_to_create, session=session)
1402
+ session.flush()
1403
+
1404
+ # increment counters
1405
+ # align code with the one used inside the file loop below
1406
+ for rse_id in replicas_to_create.keys():
1407
+ rse_counters_files[rse_id] = len(replicas_to_create[rse_id]) + rse_counters_files.get(rse_id, 0)
1408
+ rse_counters_bytes[rse_id] = sum([replica.bytes for replica in replicas_to_create[rse_id]]) + rse_counters_bytes.get(rse_id, 0)
1409
+ # prnt(rse_counters_files, 'rse_counters_files')
1410
+ # prnt(rse_counters_bytes, 'rse_counters_bytes')
1411
+
1412
+ for rse_id in locks_to_create.keys():
1413
+ account_counters_files[rse_id] = len(locks_to_create[rse_id]) + account_counters_files.get(rse_id, 0)
1414
+ account_counters_bytes[rse_id] = sum([lock.bytes for lock in locks_to_create[rse_id]]) + account_counters_bytes.get(rse_id, 0)
1415
+ # prnt(account_counters_files, 'account_counters_files')
1416
+ # prnt(account_counters_bytes, 'account_counters_bytes')
1417
+
1418
+ else:
1419
+ # handle dataset case by converting it to singleton container case
1420
+ # NOTE: this will handle DATASET/ALL as if it was DATASET/DATASET
1421
+ datasets = [] # [(scope,name)]
1422
+ if did.did_type == DIDType.DATASET:
1423
+ datasets.append((did.scope, did.name, ))
1424
+ elif did.did_type == DIDType.CONTAINER:
1425
+ for child_dataset in rucio.core.did.list_child_datasets(scope=did.scope, name=did.name, session=session):
1426
+ # ensure there are no duplicates
1427
+ newds = (child_dataset['scope'], child_dataset['name'], )
1428
+ if newds not in datasets:
1429
+ datasets.append(newds)
1430
+ # sort alphabetically for deterministic order
1431
+ try:
1432
+ datasets = sorted(datasets)
1433
+ except Exception:
1434
+ pass
1435
+
1436
+ # prnt(datasets)
1437
+
1438
+ rse_coverage = {} # rse_coverage = { rse_id : bytes }
1439
+ rse_tuples = [] # rse_tuples = [(rse_id, staging_area, availability_write)]
1440
+ used_rse_ids = [] # for NONE grouping keep track of actual used RSEs
1441
+
1442
+ if rule.grouping == RuleGrouping.ALL:
1443
+ # calculate target RSEs
1444
+ nbytes = 0
1445
+ rse_coverage = {}
1446
+ # simply loop over child datasets
1447
+ # this is an approximation because ignoring the possibility of file overlap
1448
+ for ds_scope, ds_name in datasets:
1449
+ ds = rucio.core.did.get_did(scope=ds_scope, name=ds_name, dynamic_depth=DIDType.FILE, session=session) # this will be retrieved again later on -> could be optimized
1450
+ nbytes += ds['bytes']
1451
+ one_rse_coverage = rucio.core.replica.get_rse_coverage_of_dataset(scope=ds_scope, name=ds_name, session=session)
1452
+ for rse_id, bytes_ in one_rse_coverage.items():
1453
+ rse_coverage[rse_id] = bytes_ + rse_coverage.get(rse_id, 0)
1454
+
1455
+ # prnt(rse_coverage)
1456
+ preferred_rse_ids = [x[0] for x in sorted(rse_coverage.items(), key=lambda tup: tup[1], reverse=True)]
1457
+ # prnt(preferred_rse_ids)
1458
+ rse_tuples = rseselector.select_rse(size=nbytes, preferred_rse_ids=preferred_rse_ids,
1459
+ prioritize_order_over_weight=True, existing_rse_size=rse_coverage)
1460
+ # prnt(rse_tuples)
1461
+
1462
+ for ds_scope, ds_name in datasets:
1463
+ # prnt(('processing dataset ',ds_scope, ds_name))
1464
+ #
1465
+ ds = rucio.core.did.get_did(scope=ds_scope, name=ds_name, dynamic_depth=DIDType.FILE, session=session)
1466
+ ds_length = ds['length']
1467
+ ds_bytes = ds['bytes']
1468
+ ds_open = ds['open']
1469
+ # prnt(ds)
1470
+
1471
+ # calculate number of partitions based on nr of files
1472
+ npartitions = int(ds_length / max_partition_size) + 1
1473
+ # prnt(npartitions)
1474
+
1475
+ if rule.grouping == RuleGrouping.DATASET:
1476
+ # calculate target RSEs
1477
+ rse_coverage = rucio.core.replica.get_rse_coverage_of_dataset(scope=ds_scope, name=ds_name, session=session)
1478
+ # prnt(rse_coverage)
1479
+ preferred_rse_ids = [x[0] for x in sorted(rse_coverage.items(), key=lambda tup: tup[1], reverse=True)]
1480
+ # prnt(preferred_rse_ids)
1481
+ rse_tuples = rseselector.select_rse(size=ds_bytes, preferred_rse_ids=preferred_rse_ids,
1482
+ prioritize_order_over_weight=True, existing_rse_size=rse_coverage)
1483
+ # prnt(rse_tuples)
1484
+
1485
+ # loop over the partitions even if it is just one
1486
+ for p in range(npartitions):
1487
+ # prnt(('processing partition ', p, npartitions))
1488
+
1489
+ # files is [{'scope':, 'name':, 'bytes':, 'md5':, 'adler32':}]
1490
+ # locks is {(scope,name): [SQLAlchemy]}
1491
+ # replicas = {(scope, name): [SQLAlchemy]}
1492
+ # source replicas is {(scope, name): [SQLAlchemy]}
1493
+
1494
+ # get files and replicas, lock the replicas
1495
+ files, replicas = rucio.core.replica.get_and_lock_file_replicas_for_dataset(scope=ds_scope, name=ds_name, nowait=True, restrict_rses=rses,
1496
+ total_threads=npartitions, thread_id=p, session=session)
1497
+ # prnt(files, 'files')
1498
+ # prnt(replicas, 'replicas')
1499
+
1500
+ # get and lock the replica locks
1501
+ locks = rucio.core.lock.get_files_and_replica_locks_of_dataset(scope=ds_scope, name=ds_name, nowait=True, restrict_rses=rses,
1502
+ total_threads=npartitions, thread_id=p, session=session)
1503
+ # prnt(locks, 'locks')
1504
+
1505
+ # if needed get source replicas
1506
+ if source_rses:
1507
+ source_replicas = rucio.core.replica.get_source_replicas_for_dataset(scope=ds_scope, name=ds_name, source_rses=source_rses,
1508
+ total_threads=npartitions, thread_id=p, session=session)
1509
+ else:
1510
+ source_replicas = {}
1511
+ # prnt(source_replicas, 'source_replicas')
1512
+
1513
+ # initialize accumulators for __create_lock_and_replica calls
1514
+ locks_to_create = {} # {'rse_id': [locks]}
1515
+ replicas_to_create = {} # {'rse_id': [replicas]}
1516
+ transfers_to_create = [] # [{'dest_rse_id':, 'scope':, 'name':, 'request_type':, 'metadata':}]
1517
+
1518
+ # loop over the rse tuples
1519
+ for file in files:
1520
+ # check for duplicate due to dataset overlap within container
1521
+ if len([lock for lock in locks[(file['scope'], file['name'])] if lock.rule_id == rule.id]) == rule.copies:
1522
+ logger(logging.DEBUG, '>>> WARNING skipping (shared?) file %s' % file)
1523
+ continue
1524
+
1525
+ if rule.grouping == RuleGrouping.NONE:
1526
+ # calculate target RSEs
1527
+ rse_coverage = {replica.rse_id: file['bytes'] for replica in replicas[(file['scope'], file['name'])]}
1528
+ # prnt(rse_coverage)
1529
+ preferred_rse_ids = rse_coverage.keys()
1530
+ # prnt(preferred_rse_ids)
1531
+ rse_tuples = rseselector.select_rse(size=file['bytes'], preferred_rse_ids=preferred_rse_ids,
1532
+ prioritize_order_over_weight=True, existing_rse_size=rse_coverage)
1533
+ # prnt(rse_tuples)
1534
+ # keep track of used RSEs
1535
+ for rt in rse_tuples:
1536
+ if rt[0] not in used_rse_ids:
1537
+ used_rse_ids.append(rt[0])
1538
+
1539
+ for rse_id, staging_area, availability_write in rse_tuples:
1540
+ # check for bug ????
1541
+ if len([lock for lock in locks[(file['scope'], file['name'])] if lock.rule_id == rule.id and lock.rse_id == rse_id]) == 1:
1542
+ logger(logging.DEBUG, '>>> WARNING unexpected duplicate lock for file %s at RSE %s' % (file, rse_id))
1543
+ continue
1544
+ # proceed
1545
+ __create_lock_and_replica(file=file, dataset={'scope': ds_scope, 'name': ds_name}, rule=rule,
1546
+ rse_id=rse_id, staging_area=staging_area, availability_write=availability_write, source_rses=source_rses,
1547
+ replicas=replicas, locks=locks, source_replicas=source_replicas,
1548
+ locks_to_create=locks_to_create, replicas_to_create=replicas_to_create, transfers_to_create=transfers_to_create,
1549
+ session=session)
1550
+
1551
+ # prnt(locks_to_create, 'locks_to_create')
1552
+ # prnt(replicas_to_create, 'replicas_to_create')
1553
+ # prnt(transfers_to_create, 'transfers_to_create')
1554
+
1555
+ # flush to DB
1556
+ session.add_all([item for sublist in replicas_to_create.values() for item in sublist])
1557
+ session.add_all([item for sublist in locks_to_create.values() for item in sublist])
1558
+ request_core.queue_requests(requests=transfers_to_create, session=session)
1559
+ session.flush()
1560
+
1561
+ # increment counters
1562
+ # do not update (and lock !) counters inside loop here, update at very end and only once
1563
+ for rse_id in replicas_to_create.keys():
1564
+ rse_counters_files[rse_id] = len(replicas_to_create[rse_id]) + rse_counters_files.get(rse_id, 0)
1565
+ rse_counters_bytes[rse_id] = sum([replica.bytes for replica in replicas_to_create[rse_id]]) + rse_counters_bytes.get(rse_id, 0)
1566
+ # prnt(rse_counters_files, 'rse_counters_files')
1567
+ # prnt(rse_counters_bytes, 'rse_counters_bytes')
1568
+
1569
+ for rse_id in locks_to_create.keys():
1570
+ account_counters_files[rse_id] = len(locks_to_create[rse_id]) + account_counters_files.get(rse_id, 0)
1571
+ account_counters_bytes[rse_id] = sum([lock.bytes for lock in locks_to_create[rse_id]]) + account_counters_bytes.get(rse_id, 0)
1572
+ # prnt(account_counters_files, 'account_counters_files')
1573
+ # prnt(account_counters_bytes, 'account_counters_bytes')
1574
+
1575
+ # mem()
1576
+
1577
+ # dataset lock/replica
1578
+ u_rses = (used_rse_ids if rule.grouping == RuleGrouping.NONE else [x[0] for x in rse_tuples])
1579
+ # prnt(u_rses, 'used RSE ids')
1580
+ for u_rse in u_rses:
1581
+ # prnt('creating dataset lock/replica for %s on %s' % (ds_name,u_rse))
1582
+ if rule.grouping == RuleGrouping.DATASET or rule.grouping == RuleGrouping.ALL:
1583
+ # add dataset lock
1584
+ models.DatasetLock(scope=ds_scope, name=ds_name,
1585
+ rule_id=rule.id,
1586
+ rse_id=u_rse,
1587
+ state=LockState.REPLICATING,
1588
+ account=rule.account,
1589
+ length=ds_length if not ds_open else None,
1590
+ bytes=ds_bytes if not ds_open else None
1591
+ ).save(session=session)
1592
+
1593
+ # add dataset replica if not already existing (rule_id is not in PK)
1594
+ try:
1595
+ stmt = select(
1596
+ models.CollectionReplica
1597
+ ).where(
1598
+ and_(models.CollectionReplica.scope == ds_scope,
1599
+ models.CollectionReplica.name == ds_name,
1600
+ models.CollectionReplica.rse_id == u_rse)
1601
+ )
1602
+ session.execute(stmt).one()
1603
+ except NoResultFound:
1604
+ models.CollectionReplica(scope=ds_scope, name=ds_name, did_type=DIDType.DATASET,
1605
+ rse_id=u_rse,
1606
+ bytes=0, length=0, available_bytes=0, available_replicas_cnt=0,
1607
+ state=ReplicaState.UNAVAILABLE
1608
+ ).save(session=session)
1609
+
1610
+ models.UpdatedCollectionReplica(scope=ds_scope, name=ds_name, did_type=DIDType.DATASET
1611
+ ).save(session=session)
1612
+
1613
+ # update account and rse counters
1614
+ for rse_id in rse_counters_files:
1615
+ rse_counter.increase(rse_id=rse_id, files=rse_counters_files[rse_id], bytes_=rse_counters_bytes[rse_id], session=session)
1616
+ for rse_id in account_counters_files:
1617
+ account_counter.increase(rse_id=rse_id, account=rule.account, files=account_counters_files[rse_id], bytes_=account_counters_bytes[rse_id], session=session)
1618
+ session.flush()
1619
+
1620
+ return