rucio 35.7.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of rucio might be problematic. Click here for more details.

Files changed (493) hide show
  1. rucio/__init__.py +17 -0
  2. rucio/alembicrevision.py +15 -0
  3. rucio/client/__init__.py +15 -0
  4. rucio/client/accountclient.py +433 -0
  5. rucio/client/accountlimitclient.py +183 -0
  6. rucio/client/baseclient.py +974 -0
  7. rucio/client/client.py +76 -0
  8. rucio/client/configclient.py +126 -0
  9. rucio/client/credentialclient.py +59 -0
  10. rucio/client/didclient.py +866 -0
  11. rucio/client/diracclient.py +56 -0
  12. rucio/client/downloadclient.py +1785 -0
  13. rucio/client/exportclient.py +44 -0
  14. rucio/client/fileclient.py +50 -0
  15. rucio/client/importclient.py +42 -0
  16. rucio/client/lifetimeclient.py +90 -0
  17. rucio/client/lockclient.py +109 -0
  18. rucio/client/metaconventionsclient.py +140 -0
  19. rucio/client/pingclient.py +44 -0
  20. rucio/client/replicaclient.py +454 -0
  21. rucio/client/requestclient.py +125 -0
  22. rucio/client/rseclient.py +746 -0
  23. rucio/client/ruleclient.py +294 -0
  24. rucio/client/scopeclient.py +90 -0
  25. rucio/client/subscriptionclient.py +173 -0
  26. rucio/client/touchclient.py +82 -0
  27. rucio/client/uploadclient.py +955 -0
  28. rucio/common/__init__.py +13 -0
  29. rucio/common/cache.py +74 -0
  30. rucio/common/config.py +801 -0
  31. rucio/common/constants.py +159 -0
  32. rucio/common/constraints.py +17 -0
  33. rucio/common/didtype.py +189 -0
  34. rucio/common/dumper/__init__.py +335 -0
  35. rucio/common/dumper/consistency.py +452 -0
  36. rucio/common/dumper/data_models.py +318 -0
  37. rucio/common/dumper/path_parsing.py +64 -0
  38. rucio/common/exception.py +1151 -0
  39. rucio/common/extra.py +36 -0
  40. rucio/common/logging.py +420 -0
  41. rucio/common/pcache.py +1408 -0
  42. rucio/common/plugins.py +153 -0
  43. rucio/common/policy.py +84 -0
  44. rucio/common/schema/__init__.py +150 -0
  45. rucio/common/schema/atlas.py +413 -0
  46. rucio/common/schema/belleii.py +408 -0
  47. rucio/common/schema/domatpc.py +401 -0
  48. rucio/common/schema/escape.py +426 -0
  49. rucio/common/schema/generic.py +433 -0
  50. rucio/common/schema/generic_multi_vo.py +412 -0
  51. rucio/common/schema/icecube.py +406 -0
  52. rucio/common/stomp_utils.py +159 -0
  53. rucio/common/stopwatch.py +55 -0
  54. rucio/common/test_rucio_server.py +148 -0
  55. rucio/common/types.py +403 -0
  56. rucio/common/utils.py +2238 -0
  57. rucio/core/__init__.py +13 -0
  58. rucio/core/account.py +496 -0
  59. rucio/core/account_counter.py +236 -0
  60. rucio/core/account_limit.py +423 -0
  61. rucio/core/authentication.py +620 -0
  62. rucio/core/config.py +456 -0
  63. rucio/core/credential.py +225 -0
  64. rucio/core/did.py +3000 -0
  65. rucio/core/did_meta_plugins/__init__.py +252 -0
  66. rucio/core/did_meta_plugins/did_column_meta.py +331 -0
  67. rucio/core/did_meta_plugins/did_meta_plugin_interface.py +165 -0
  68. rucio/core/did_meta_plugins/filter_engine.py +613 -0
  69. rucio/core/did_meta_plugins/json_meta.py +240 -0
  70. rucio/core/did_meta_plugins/mongo_meta.py +216 -0
  71. rucio/core/did_meta_plugins/postgres_meta.py +316 -0
  72. rucio/core/dirac.py +237 -0
  73. rucio/core/distance.py +187 -0
  74. rucio/core/exporter.py +59 -0
  75. rucio/core/heartbeat.py +363 -0
  76. rucio/core/identity.py +300 -0
  77. rucio/core/importer.py +259 -0
  78. rucio/core/lifetime_exception.py +377 -0
  79. rucio/core/lock.py +576 -0
  80. rucio/core/message.py +282 -0
  81. rucio/core/meta_conventions.py +203 -0
  82. rucio/core/monitor.py +447 -0
  83. rucio/core/naming_convention.py +195 -0
  84. rucio/core/nongrid_trace.py +136 -0
  85. rucio/core/oidc.py +1461 -0
  86. rucio/core/permission/__init__.py +119 -0
  87. rucio/core/permission/atlas.py +1348 -0
  88. rucio/core/permission/belleii.py +1077 -0
  89. rucio/core/permission/escape.py +1078 -0
  90. rucio/core/permission/generic.py +1130 -0
  91. rucio/core/permission/generic_multi_vo.py +1150 -0
  92. rucio/core/quarantined_replica.py +223 -0
  93. rucio/core/replica.py +4158 -0
  94. rucio/core/replica_sorter.py +366 -0
  95. rucio/core/request.py +3089 -0
  96. rucio/core/rse.py +1875 -0
  97. rucio/core/rse_counter.py +186 -0
  98. rucio/core/rse_expression_parser.py +459 -0
  99. rucio/core/rse_selector.py +302 -0
  100. rucio/core/rule.py +4483 -0
  101. rucio/core/rule_grouping.py +1618 -0
  102. rucio/core/scope.py +180 -0
  103. rucio/core/subscription.py +364 -0
  104. rucio/core/topology.py +490 -0
  105. rucio/core/trace.py +375 -0
  106. rucio/core/transfer.py +1517 -0
  107. rucio/core/vo.py +169 -0
  108. rucio/core/volatile_replica.py +150 -0
  109. rucio/daemons/__init__.py +13 -0
  110. rucio/daemons/abacus/__init__.py +13 -0
  111. rucio/daemons/abacus/account.py +116 -0
  112. rucio/daemons/abacus/collection_replica.py +124 -0
  113. rucio/daemons/abacus/rse.py +117 -0
  114. rucio/daemons/atropos/__init__.py +13 -0
  115. rucio/daemons/atropos/atropos.py +242 -0
  116. rucio/daemons/auditor/__init__.py +289 -0
  117. rucio/daemons/auditor/hdfs.py +97 -0
  118. rucio/daemons/auditor/srmdumps.py +355 -0
  119. rucio/daemons/automatix/__init__.py +13 -0
  120. rucio/daemons/automatix/automatix.py +293 -0
  121. rucio/daemons/badreplicas/__init__.py +13 -0
  122. rucio/daemons/badreplicas/minos.py +322 -0
  123. rucio/daemons/badreplicas/minos_temporary_expiration.py +171 -0
  124. rucio/daemons/badreplicas/necromancer.py +196 -0
  125. rucio/daemons/bb8/__init__.py +13 -0
  126. rucio/daemons/bb8/bb8.py +353 -0
  127. rucio/daemons/bb8/common.py +759 -0
  128. rucio/daemons/bb8/nuclei_background_rebalance.py +153 -0
  129. rucio/daemons/bb8/t2_background_rebalance.py +153 -0
  130. rucio/daemons/c3po/__init__.py +13 -0
  131. rucio/daemons/c3po/algorithms/__init__.py +13 -0
  132. rucio/daemons/c3po/algorithms/simple.py +134 -0
  133. rucio/daemons/c3po/algorithms/t2_free_space.py +128 -0
  134. rucio/daemons/c3po/algorithms/t2_free_space_only_pop.py +130 -0
  135. rucio/daemons/c3po/algorithms/t2_free_space_only_pop_with_network.py +294 -0
  136. rucio/daemons/c3po/c3po.py +371 -0
  137. rucio/daemons/c3po/collectors/__init__.py +13 -0
  138. rucio/daemons/c3po/collectors/agis.py +108 -0
  139. rucio/daemons/c3po/collectors/free_space.py +81 -0
  140. rucio/daemons/c3po/collectors/jedi_did.py +57 -0
  141. rucio/daemons/c3po/collectors/mock_did.py +51 -0
  142. rucio/daemons/c3po/collectors/network_metrics.py +71 -0
  143. rucio/daemons/c3po/collectors/workload.py +112 -0
  144. rucio/daemons/c3po/utils/__init__.py +13 -0
  145. rucio/daemons/c3po/utils/dataset_cache.py +50 -0
  146. rucio/daemons/c3po/utils/expiring_dataset_cache.py +56 -0
  147. rucio/daemons/c3po/utils/expiring_list.py +62 -0
  148. rucio/daemons/c3po/utils/popularity.py +85 -0
  149. rucio/daemons/c3po/utils/timeseries.py +89 -0
  150. rucio/daemons/cache/__init__.py +13 -0
  151. rucio/daemons/cache/consumer.py +197 -0
  152. rucio/daemons/common.py +415 -0
  153. rucio/daemons/conveyor/__init__.py +13 -0
  154. rucio/daemons/conveyor/common.py +562 -0
  155. rucio/daemons/conveyor/finisher.py +529 -0
  156. rucio/daemons/conveyor/poller.py +404 -0
  157. rucio/daemons/conveyor/preparer.py +205 -0
  158. rucio/daemons/conveyor/receiver.py +249 -0
  159. rucio/daemons/conveyor/stager.py +132 -0
  160. rucio/daemons/conveyor/submitter.py +403 -0
  161. rucio/daemons/conveyor/throttler.py +532 -0
  162. rucio/daemons/follower/__init__.py +13 -0
  163. rucio/daemons/follower/follower.py +101 -0
  164. rucio/daemons/hermes/__init__.py +13 -0
  165. rucio/daemons/hermes/hermes.py +774 -0
  166. rucio/daemons/judge/__init__.py +13 -0
  167. rucio/daemons/judge/cleaner.py +159 -0
  168. rucio/daemons/judge/evaluator.py +185 -0
  169. rucio/daemons/judge/injector.py +162 -0
  170. rucio/daemons/judge/repairer.py +154 -0
  171. rucio/daemons/oauthmanager/__init__.py +13 -0
  172. rucio/daemons/oauthmanager/oauthmanager.py +198 -0
  173. rucio/daemons/reaper/__init__.py +13 -0
  174. rucio/daemons/reaper/dark_reaper.py +278 -0
  175. rucio/daemons/reaper/reaper.py +743 -0
  176. rucio/daemons/replicarecoverer/__init__.py +13 -0
  177. rucio/daemons/replicarecoverer/suspicious_replica_recoverer.py +626 -0
  178. rucio/daemons/rsedecommissioner/__init__.py +13 -0
  179. rucio/daemons/rsedecommissioner/config.py +81 -0
  180. rucio/daemons/rsedecommissioner/profiles/__init__.py +24 -0
  181. rucio/daemons/rsedecommissioner/profiles/atlas.py +60 -0
  182. rucio/daemons/rsedecommissioner/profiles/generic.py +451 -0
  183. rucio/daemons/rsedecommissioner/profiles/types.py +92 -0
  184. rucio/daemons/rsedecommissioner/rse_decommissioner.py +280 -0
  185. rucio/daemons/storage/__init__.py +13 -0
  186. rucio/daemons/storage/consistency/__init__.py +13 -0
  187. rucio/daemons/storage/consistency/actions.py +846 -0
  188. rucio/daemons/tracer/__init__.py +13 -0
  189. rucio/daemons/tracer/kronos.py +536 -0
  190. rucio/daemons/transmogrifier/__init__.py +13 -0
  191. rucio/daemons/transmogrifier/transmogrifier.py +762 -0
  192. rucio/daemons/undertaker/__init__.py +13 -0
  193. rucio/daemons/undertaker/undertaker.py +137 -0
  194. rucio/db/__init__.py +13 -0
  195. rucio/db/sqla/__init__.py +52 -0
  196. rucio/db/sqla/constants.py +201 -0
  197. rucio/db/sqla/migrate_repo/__init__.py +13 -0
  198. rucio/db/sqla/migrate_repo/env.py +110 -0
  199. rucio/db/sqla/migrate_repo/versions/01eaf73ab656_add_new_rule_notification_state_progress.py +70 -0
  200. rucio/db/sqla/migrate_repo/versions/0437a40dbfd1_add_eol_at_in_rules.py +47 -0
  201. rucio/db/sqla/migrate_repo/versions/0f1adb7a599a_create_transfer_hops_table.py +59 -0
  202. rucio/db/sqla/migrate_repo/versions/102efcf145f4_added_stuck_at_column_to_rules.py +43 -0
  203. rucio/db/sqla/migrate_repo/versions/13d4f70c66a9_introduce_transfer_limits.py +91 -0
  204. rucio/db/sqla/migrate_repo/versions/140fef722e91_cleanup_distances_table.py +76 -0
  205. rucio/db/sqla/migrate_repo/versions/14ec5aeb64cf_add_request_external_host.py +43 -0
  206. rucio/db/sqla/migrate_repo/versions/156fb5b5a14_add_request_type_to_requests_idx.py +50 -0
  207. rucio/db/sqla/migrate_repo/versions/1677d4d803c8_split_rse_availability_into_multiple.py +68 -0
  208. rucio/db/sqla/migrate_repo/versions/16a0aca82e12_create_index_on_table_replicas_path.py +40 -0
  209. rucio/db/sqla/migrate_repo/versions/1803333ac20f_adding_provenance_and_phys_group.py +45 -0
  210. rucio/db/sqla/migrate_repo/versions/1a29d6a9504c_add_didtype_chck_to_requests.py +60 -0
  211. rucio/db/sqla/migrate_repo/versions/1a80adff031a_create_index_on_rules_hist_recent.py +40 -0
  212. rucio/db/sqla/migrate_repo/versions/1c45d9730ca6_increase_identity_length.py +140 -0
  213. rucio/db/sqla/migrate_repo/versions/1d1215494e95_add_quarantined_replicas_table.py +73 -0
  214. rucio/db/sqla/migrate_repo/versions/1d96f484df21_asynchronous_rules_and_rule_approval.py +74 -0
  215. rucio/db/sqla/migrate_repo/versions/1f46c5f240ac_add_bytes_column_to_bad_replicas.py +43 -0
  216. rucio/db/sqla/migrate_repo/versions/1fc15ab60d43_add_message_history_table.py +50 -0
  217. rucio/db/sqla/migrate_repo/versions/2190e703eb6e_move_rse_settings_to_rse_attributes.py +134 -0
  218. rucio/db/sqla/migrate_repo/versions/21d6b9dc9961_add_mismatch_scheme_state_to_requests.py +64 -0
  219. rucio/db/sqla/migrate_repo/versions/22cf51430c78_add_availability_column_to_table_rses.py +39 -0
  220. rucio/db/sqla/migrate_repo/versions/22d887e4ec0a_create_sources_table.py +64 -0
  221. rucio/db/sqla/migrate_repo/versions/25821a8a45a3_remove_unique_constraint_on_requests.py +51 -0
  222. rucio/db/sqla/migrate_repo/versions/25fc855625cf_added_unique_constraint_to_rules.py +41 -0
  223. rucio/db/sqla/migrate_repo/versions/269fee20dee9_add_repair_cnt_to_locks.py +43 -0
  224. rucio/db/sqla/migrate_repo/versions/271a46ea6244_add_ignore_availability_column_to_rules.py +44 -0
  225. rucio/db/sqla/migrate_repo/versions/277b5fbb41d3_switch_heartbeats_executable.py +53 -0
  226. rucio/db/sqla/migrate_repo/versions/27e3a68927fb_remove_replicas_tombstone_and_replicas_.py +38 -0
  227. rucio/db/sqla/migrate_repo/versions/2854cd9e168_added_rule_id_column.py +47 -0
  228. rucio/db/sqla/migrate_repo/versions/295289b5a800_processed_by_and__at_in_requests.py +45 -0
  229. rucio/db/sqla/migrate_repo/versions/2962ece31cf4_add_nbaccesses_column_in_the_did_table.py +45 -0
  230. rucio/db/sqla/migrate_repo/versions/2af3291ec4c_added_replicas_history_table.py +57 -0
  231. rucio/db/sqla/migrate_repo/versions/2b69addda658_add_columns_for_third_party_copy_read_.py +45 -0
  232. rucio/db/sqla/migrate_repo/versions/2b8e7bcb4783_add_config_table.py +69 -0
  233. rucio/db/sqla/migrate_repo/versions/2ba5229cb54c_add_submitted_at_to_requests_table.py +43 -0
  234. rucio/db/sqla/migrate_repo/versions/2cbee484dcf9_added_column_volume_to_rse_transfer_.py +42 -0
  235. rucio/db/sqla/migrate_repo/versions/2edee4a83846_add_source_to_requests_and_requests_.py +47 -0
  236. rucio/db/sqla/migrate_repo/versions/2eef46be23d4_change_tokens_pk.py +46 -0
  237. rucio/db/sqla/migrate_repo/versions/2f648fc909f3_index_in_rule_history_on_scope_name.py +40 -0
  238. rucio/db/sqla/migrate_repo/versions/3082b8cef557_add_naming_convention_table_and_closed_.py +67 -0
  239. rucio/db/sqla/migrate_repo/versions/30fa38b6434e_add_index_on_service_column_in_the_message_table.py +44 -0
  240. rucio/db/sqla/migrate_repo/versions/3152492b110b_added_staging_area_column.py +77 -0
  241. rucio/db/sqla/migrate_repo/versions/32c7d2783f7e_create_bad_replicas_table.py +60 -0
  242. rucio/db/sqla/migrate_repo/versions/3345511706b8_replicas_table_pk_definition_is_in_.py +72 -0
  243. rucio/db/sqla/migrate_repo/versions/35ef10d1e11b_change_index_on_table_requests.py +42 -0
  244. rucio/db/sqla/migrate_repo/versions/379a19b5332d_create_rse_limits_table.py +65 -0
  245. rucio/db/sqla/migrate_repo/versions/384b96aa0f60_created_rule_history_tables.py +133 -0
  246. rucio/db/sqla/migrate_repo/versions/3ac1660a1a72_extend_distance_table.py +55 -0
  247. rucio/db/sqla/migrate_repo/versions/3ad36e2268b0_create_collection_replicas_updates_table.py +76 -0
  248. rucio/db/sqla/migrate_repo/versions/3c9df354071b_extend_waiting_request_state.py +60 -0
  249. rucio/db/sqla/migrate_repo/versions/3d9813fab443_add_a_new_state_lost_in_badfilesstatus.py +44 -0
  250. rucio/db/sqla/migrate_repo/versions/40ad39ce3160_add_transferred_at_to_requests_table.py +43 -0
  251. rucio/db/sqla/migrate_repo/versions/4207be2fd914_add_notification_column_to_rules.py +64 -0
  252. rucio/db/sqla/migrate_repo/versions/42db2617c364_create_index_on_requests_external_id.py +40 -0
  253. rucio/db/sqla/migrate_repo/versions/436827b13f82_added_column_activity_to_table_requests.py +43 -0
  254. rucio/db/sqla/migrate_repo/versions/44278720f774_update_requests_typ_sta_upd_idx_index.py +44 -0
  255. rucio/db/sqla/migrate_repo/versions/45378a1e76a8_create_collection_replica_table.py +78 -0
  256. rucio/db/sqla/migrate_repo/versions/469d262be19_removing_created_at_index.py +41 -0
  257. rucio/db/sqla/migrate_repo/versions/4783c1f49cb4_create_distance_table.py +59 -0
  258. rucio/db/sqla/migrate_repo/versions/49a21b4d4357_create_index_on_table_tokens.py +44 -0
  259. rucio/db/sqla/migrate_repo/versions/4a2cbedda8b9_add_source_replica_expression_column_to_.py +43 -0
  260. rucio/db/sqla/migrate_repo/versions/4a7182d9578b_added_bytes_length_accessed_at_columns.py +49 -0
  261. rucio/db/sqla/migrate_repo/versions/4bab9edd01fc_create_index_on_requests_rule_id.py +40 -0
  262. rucio/db/sqla/migrate_repo/versions/4c3a4acfe006_new_attr_account_table.py +63 -0
  263. rucio/db/sqla/migrate_repo/versions/4cf0a2e127d4_adding_transient_metadata.py +43 -0
  264. rucio/db/sqla/migrate_repo/versions/4df2c5ddabc0_remove_temporary_dids.py +55 -0
  265. rucio/db/sqla/migrate_repo/versions/50280c53117c_add_qos_class_to_rse.py +45 -0
  266. rucio/db/sqla/migrate_repo/versions/52153819589c_add_rse_id_to_replicas_table.py +43 -0
  267. rucio/db/sqla/migrate_repo/versions/52fd9f4916fa_added_activity_to_rules.py +43 -0
  268. rucio/db/sqla/migrate_repo/versions/53b479c3cb0f_fix_did_meta_table_missing_updated_at_.py +45 -0
  269. rucio/db/sqla/migrate_repo/versions/5673b4b6e843_add_wfms_metadata_to_rule_tables.py +47 -0
  270. rucio/db/sqla/migrate_repo/versions/575767d9f89_added_source_history_table.py +58 -0
  271. rucio/db/sqla/migrate_repo/versions/58bff7008037_add_started_at_to_requests.py +45 -0
  272. rucio/db/sqla/migrate_repo/versions/58c8b78301ab_rename_callback_to_message.py +106 -0
  273. rucio/db/sqla/migrate_repo/versions/5f139f77382a_added_child_rule_id_column.py +55 -0
  274. rucio/db/sqla/migrate_repo/versions/688ef1840840_adding_did_meta_table.py +50 -0
  275. rucio/db/sqla/migrate_repo/versions/6e572a9bfbf3_add_new_split_container_column_to_rules.py +47 -0
  276. rucio/db/sqla/migrate_repo/versions/70587619328_add_comment_column_for_subscriptions.py +43 -0
  277. rucio/db/sqla/migrate_repo/versions/739064d31565_remove_history_table_pks.py +41 -0
  278. rucio/db/sqla/migrate_repo/versions/7541902bf173_add_didsfollowed_and_followevents_table.py +91 -0
  279. rucio/db/sqla/migrate_repo/versions/7ec22226cdbf_new_replica_state_for_temporary_.py +72 -0
  280. rucio/db/sqla/migrate_repo/versions/810a41685bc1_added_columns_rse_transfer_limits.py +49 -0
  281. rucio/db/sqla/migrate_repo/versions/83f991c63a93_correct_rse_expression_length.py +43 -0
  282. rucio/db/sqla/migrate_repo/versions/8523998e2e76_increase_size_of_extended_attributes_.py +43 -0
  283. rucio/db/sqla/migrate_repo/versions/8ea9122275b1_adding_missing_function_based_indices.py +53 -0
  284. rucio/db/sqla/migrate_repo/versions/90f47792bb76_add_clob_payload_to_messages.py +45 -0
  285. rucio/db/sqla/migrate_repo/versions/914b8f02df38_new_table_for_lifetime_model_exceptions.py +68 -0
  286. rucio/db/sqla/migrate_repo/versions/94a5961ddbf2_add_estimator_columns.py +45 -0
  287. rucio/db/sqla/migrate_repo/versions/9a1b149a2044_add_saml_identity_type.py +94 -0
  288. rucio/db/sqla/migrate_repo/versions/9a45bc4ea66d_add_vp_table.py +54 -0
  289. rucio/db/sqla/migrate_repo/versions/9eb936a81eb1_true_is_true.py +72 -0
  290. rucio/db/sqla/migrate_repo/versions/a08fa8de1545_transfer_stats_table.py +55 -0
  291. rucio/db/sqla/migrate_repo/versions/a118956323f8_added_vo_table_and_vo_col_to_rse.py +76 -0
  292. rucio/db/sqla/migrate_repo/versions/a193a275255c_add_status_column_in_messages.py +47 -0
  293. rucio/db/sqla/migrate_repo/versions/a5f6f6e928a7_1_7_0.py +121 -0
  294. rucio/db/sqla/migrate_repo/versions/a616581ee47_added_columns_to_table_requests.py +59 -0
  295. rucio/db/sqla/migrate_repo/versions/a6eb23955c28_state_idx_non_functional.py +52 -0
  296. rucio/db/sqla/migrate_repo/versions/a74275a1ad30_added_global_quota_table.py +54 -0
  297. rucio/db/sqla/migrate_repo/versions/a93e4e47bda_heartbeats.py +64 -0
  298. rucio/db/sqla/migrate_repo/versions/ae2a56fcc89_added_comment_column_to_rules.py +49 -0
  299. rucio/db/sqla/migrate_repo/versions/b0070f3695c8_add_deletedidmeta_table.py +57 -0
  300. rucio/db/sqla/migrate_repo/versions/b4293a99f344_added_column_identity_to_table_tokens.py +43 -0
  301. rucio/db/sqla/migrate_repo/versions/b5493606bbf5_fix_primary_key_for_subscription_history.py +41 -0
  302. rucio/db/sqla/migrate_repo/versions/b7d287de34fd_removal_of_replicastate_source.py +91 -0
  303. rucio/db/sqla/migrate_repo/versions/b818052fa670_add_index_to_quarantined_replicas.py +40 -0
  304. rucio/db/sqla/migrate_repo/versions/b8caac94d7f0_add_comments_column_for_subscriptions_.py +43 -0
  305. rucio/db/sqla/migrate_repo/versions/b96a1c7e1cc4_new_bad_pfns_table_and_bad_replicas_.py +143 -0
  306. rucio/db/sqla/migrate_repo/versions/bb695f45c04_extend_request_state.py +76 -0
  307. rucio/db/sqla/migrate_repo/versions/bc68e9946deb_add_staging_timestamps_to_request.py +50 -0
  308. rucio/db/sqla/migrate_repo/versions/bf3baa1c1474_correct_pk_and_idx_for_history_tables.py +72 -0
  309. rucio/db/sqla/migrate_repo/versions/c0937668555f_add_qos_policy_map_table.py +55 -0
  310. rucio/db/sqla/migrate_repo/versions/c129ccdb2d5_add_lumiblocknr_to_dids.py +43 -0
  311. rucio/db/sqla/migrate_repo/versions/ccdbcd48206e_add_did_type_column_index_on_did_meta_.py +65 -0
  312. rucio/db/sqla/migrate_repo/versions/cebad904c4dd_new_payload_column_for_heartbeats.py +47 -0
  313. rucio/db/sqla/migrate_repo/versions/d1189a09c6e0_oauth2_0_and_jwt_feature_support_adding_.py +146 -0
  314. rucio/db/sqla/migrate_repo/versions/d23453595260_extend_request_state_for_preparer.py +104 -0
  315. rucio/db/sqla/migrate_repo/versions/d6dceb1de2d_added_purge_column_to_rules.py +44 -0
  316. rucio/db/sqla/migrate_repo/versions/d6e2c3b2cf26_remove_third_party_copy_column_from_rse.py +43 -0
  317. rucio/db/sqla/migrate_repo/versions/d91002c5841_new_account_limits_table.py +103 -0
  318. rucio/db/sqla/migrate_repo/versions/e138c364ebd0_extending_columns_for_filter_and_.py +49 -0
  319. rucio/db/sqla/migrate_repo/versions/e59300c8b179_support_for_archive.py +104 -0
  320. rucio/db/sqla/migrate_repo/versions/f1b14a8c2ac1_postgres_use_check_constraints.py +29 -0
  321. rucio/db/sqla/migrate_repo/versions/f41ffe206f37_oracle_global_temporary_tables.py +74 -0
  322. rucio/db/sqla/migrate_repo/versions/f85a2962b021_adding_transfertool_column_to_requests_.py +47 -0
  323. rucio/db/sqla/migrate_repo/versions/fa7a7d78b602_increase_refresh_token_size.py +43 -0
  324. rucio/db/sqla/migrate_repo/versions/fb28a95fe288_add_replicas_rse_id_tombstone_idx.py +37 -0
  325. rucio/db/sqla/migrate_repo/versions/fe1a65b176c9_set_third_party_copy_read_and_write_.py +43 -0
  326. rucio/db/sqla/migrate_repo/versions/fe8ea2fa9788_added_third_party_copy_column_to_rse_.py +43 -0
  327. rucio/db/sqla/models.py +1740 -0
  328. rucio/db/sqla/sautils.py +55 -0
  329. rucio/db/sqla/session.py +498 -0
  330. rucio/db/sqla/types.py +206 -0
  331. rucio/db/sqla/util.py +543 -0
  332. rucio/gateway/__init__.py +13 -0
  333. rucio/gateway/account.py +339 -0
  334. rucio/gateway/account_limit.py +286 -0
  335. rucio/gateway/authentication.py +375 -0
  336. rucio/gateway/config.py +217 -0
  337. rucio/gateway/credential.py +71 -0
  338. rucio/gateway/did.py +970 -0
  339. rucio/gateway/dirac.py +81 -0
  340. rucio/gateway/exporter.py +59 -0
  341. rucio/gateway/heartbeat.py +74 -0
  342. rucio/gateway/identity.py +204 -0
  343. rucio/gateway/importer.py +45 -0
  344. rucio/gateway/lifetime_exception.py +120 -0
  345. rucio/gateway/lock.py +153 -0
  346. rucio/gateway/meta_conventions.py +87 -0
  347. rucio/gateway/permission.py +71 -0
  348. rucio/gateway/quarantined_replica.py +78 -0
  349. rucio/gateway/replica.py +529 -0
  350. rucio/gateway/request.py +321 -0
  351. rucio/gateway/rse.py +600 -0
  352. rucio/gateway/rule.py +417 -0
  353. rucio/gateway/scope.py +99 -0
  354. rucio/gateway/subscription.py +277 -0
  355. rucio/gateway/vo.py +122 -0
  356. rucio/rse/__init__.py +96 -0
  357. rucio/rse/protocols/__init__.py +13 -0
  358. rucio/rse/protocols/bittorrent.py +184 -0
  359. rucio/rse/protocols/cache.py +122 -0
  360. rucio/rse/protocols/dummy.py +111 -0
  361. rucio/rse/protocols/gfal.py +703 -0
  362. rucio/rse/protocols/globus.py +243 -0
  363. rucio/rse/protocols/gsiftp.py +92 -0
  364. rucio/rse/protocols/http_cache.py +82 -0
  365. rucio/rse/protocols/mock.py +123 -0
  366. rucio/rse/protocols/ngarc.py +209 -0
  367. rucio/rse/protocols/posix.py +250 -0
  368. rucio/rse/protocols/protocol.py +594 -0
  369. rucio/rse/protocols/rclone.py +364 -0
  370. rucio/rse/protocols/rfio.py +136 -0
  371. rucio/rse/protocols/srm.py +338 -0
  372. rucio/rse/protocols/ssh.py +413 -0
  373. rucio/rse/protocols/storm.py +206 -0
  374. rucio/rse/protocols/webdav.py +550 -0
  375. rucio/rse/protocols/xrootd.py +301 -0
  376. rucio/rse/rsemanager.py +764 -0
  377. rucio/tests/__init__.py +13 -0
  378. rucio/tests/common.py +270 -0
  379. rucio/tests/common_server.py +132 -0
  380. rucio/transfertool/__init__.py +13 -0
  381. rucio/transfertool/bittorrent.py +199 -0
  382. rucio/transfertool/bittorrent_driver.py +52 -0
  383. rucio/transfertool/bittorrent_driver_qbittorrent.py +133 -0
  384. rucio/transfertool/fts3.py +1596 -0
  385. rucio/transfertool/fts3_plugins.py +152 -0
  386. rucio/transfertool/globus.py +201 -0
  387. rucio/transfertool/globus_library.py +181 -0
  388. rucio/transfertool/mock.py +90 -0
  389. rucio/transfertool/transfertool.py +221 -0
  390. rucio/vcsversion.py +11 -0
  391. rucio/version.py +38 -0
  392. rucio/web/__init__.py +13 -0
  393. rucio/web/rest/__init__.py +13 -0
  394. rucio/web/rest/flaskapi/__init__.py +13 -0
  395. rucio/web/rest/flaskapi/authenticated_bp.py +27 -0
  396. rucio/web/rest/flaskapi/v1/__init__.py +13 -0
  397. rucio/web/rest/flaskapi/v1/accountlimits.py +236 -0
  398. rucio/web/rest/flaskapi/v1/accounts.py +1089 -0
  399. rucio/web/rest/flaskapi/v1/archives.py +102 -0
  400. rucio/web/rest/flaskapi/v1/auth.py +1644 -0
  401. rucio/web/rest/flaskapi/v1/common.py +426 -0
  402. rucio/web/rest/flaskapi/v1/config.py +304 -0
  403. rucio/web/rest/flaskapi/v1/credentials.py +212 -0
  404. rucio/web/rest/flaskapi/v1/dids.py +2334 -0
  405. rucio/web/rest/flaskapi/v1/dirac.py +116 -0
  406. rucio/web/rest/flaskapi/v1/export.py +75 -0
  407. rucio/web/rest/flaskapi/v1/heartbeats.py +127 -0
  408. rucio/web/rest/flaskapi/v1/identities.py +261 -0
  409. rucio/web/rest/flaskapi/v1/import.py +132 -0
  410. rucio/web/rest/flaskapi/v1/lifetime_exceptions.py +312 -0
  411. rucio/web/rest/flaskapi/v1/locks.py +358 -0
  412. rucio/web/rest/flaskapi/v1/main.py +91 -0
  413. rucio/web/rest/flaskapi/v1/meta_conventions.py +241 -0
  414. rucio/web/rest/flaskapi/v1/metrics.py +36 -0
  415. rucio/web/rest/flaskapi/v1/nongrid_traces.py +97 -0
  416. rucio/web/rest/flaskapi/v1/ping.py +88 -0
  417. rucio/web/rest/flaskapi/v1/redirect.py +365 -0
  418. rucio/web/rest/flaskapi/v1/replicas.py +1890 -0
  419. rucio/web/rest/flaskapi/v1/requests.py +998 -0
  420. rucio/web/rest/flaskapi/v1/rses.py +2239 -0
  421. rucio/web/rest/flaskapi/v1/rules.py +854 -0
  422. rucio/web/rest/flaskapi/v1/scopes.py +159 -0
  423. rucio/web/rest/flaskapi/v1/subscriptions.py +650 -0
  424. rucio/web/rest/flaskapi/v1/templates/auth_crash.html +80 -0
  425. rucio/web/rest/flaskapi/v1/templates/auth_granted.html +82 -0
  426. rucio/web/rest/flaskapi/v1/traces.py +100 -0
  427. rucio/web/rest/flaskapi/v1/types.py +20 -0
  428. rucio/web/rest/flaskapi/v1/vos.py +278 -0
  429. rucio/web/rest/main.py +18 -0
  430. rucio/web/rest/metrics.py +27 -0
  431. rucio/web/rest/ping.py +27 -0
  432. rucio-35.7.0.data/data/rucio/etc/alembic.ini.template +71 -0
  433. rucio-35.7.0.data/data/rucio/etc/alembic_offline.ini.template +74 -0
  434. rucio-35.7.0.data/data/rucio/etc/globus-config.yml.template +5 -0
  435. rucio-35.7.0.data/data/rucio/etc/ldap.cfg.template +30 -0
  436. rucio-35.7.0.data/data/rucio/etc/mail_templates/rule_approval_request.tmpl +38 -0
  437. rucio-35.7.0.data/data/rucio/etc/mail_templates/rule_approved_admin.tmpl +4 -0
  438. rucio-35.7.0.data/data/rucio/etc/mail_templates/rule_approved_user.tmpl +17 -0
  439. rucio-35.7.0.data/data/rucio/etc/mail_templates/rule_denied_admin.tmpl +6 -0
  440. rucio-35.7.0.data/data/rucio/etc/mail_templates/rule_denied_user.tmpl +17 -0
  441. rucio-35.7.0.data/data/rucio/etc/mail_templates/rule_ok_notification.tmpl +19 -0
  442. rucio-35.7.0.data/data/rucio/etc/rse-accounts.cfg.template +25 -0
  443. rucio-35.7.0.data/data/rucio/etc/rucio.cfg.atlas.client.template +42 -0
  444. rucio-35.7.0.data/data/rucio/etc/rucio.cfg.template +257 -0
  445. rucio-35.7.0.data/data/rucio/etc/rucio_multi_vo.cfg.template +234 -0
  446. rucio-35.7.0.data/data/rucio/requirements.server.txt +268 -0
  447. rucio-35.7.0.data/data/rucio/tools/bootstrap.py +34 -0
  448. rucio-35.7.0.data/data/rucio/tools/merge_rucio_configs.py +144 -0
  449. rucio-35.7.0.data/data/rucio/tools/reset_database.py +40 -0
  450. rucio-35.7.0.data/scripts/rucio +2542 -0
  451. rucio-35.7.0.data/scripts/rucio-abacus-account +74 -0
  452. rucio-35.7.0.data/scripts/rucio-abacus-collection-replica +46 -0
  453. rucio-35.7.0.data/scripts/rucio-abacus-rse +78 -0
  454. rucio-35.7.0.data/scripts/rucio-admin +2447 -0
  455. rucio-35.7.0.data/scripts/rucio-atropos +60 -0
  456. rucio-35.7.0.data/scripts/rucio-auditor +205 -0
  457. rucio-35.7.0.data/scripts/rucio-automatix +50 -0
  458. rucio-35.7.0.data/scripts/rucio-bb8 +57 -0
  459. rucio-35.7.0.data/scripts/rucio-c3po +85 -0
  460. rucio-35.7.0.data/scripts/rucio-cache-client +134 -0
  461. rucio-35.7.0.data/scripts/rucio-cache-consumer +42 -0
  462. rucio-35.7.0.data/scripts/rucio-conveyor-finisher +58 -0
  463. rucio-35.7.0.data/scripts/rucio-conveyor-poller +66 -0
  464. rucio-35.7.0.data/scripts/rucio-conveyor-preparer +37 -0
  465. rucio-35.7.0.data/scripts/rucio-conveyor-receiver +43 -0
  466. rucio-35.7.0.data/scripts/rucio-conveyor-stager +76 -0
  467. rucio-35.7.0.data/scripts/rucio-conveyor-submitter +139 -0
  468. rucio-35.7.0.data/scripts/rucio-conveyor-throttler +104 -0
  469. rucio-35.7.0.data/scripts/rucio-dark-reaper +53 -0
  470. rucio-35.7.0.data/scripts/rucio-dumper +160 -0
  471. rucio-35.7.0.data/scripts/rucio-follower +44 -0
  472. rucio-35.7.0.data/scripts/rucio-hermes +54 -0
  473. rucio-35.7.0.data/scripts/rucio-judge-cleaner +89 -0
  474. rucio-35.7.0.data/scripts/rucio-judge-evaluator +137 -0
  475. rucio-35.7.0.data/scripts/rucio-judge-injector +44 -0
  476. rucio-35.7.0.data/scripts/rucio-judge-repairer +44 -0
  477. rucio-35.7.0.data/scripts/rucio-kronos +43 -0
  478. rucio-35.7.0.data/scripts/rucio-minos +53 -0
  479. rucio-35.7.0.data/scripts/rucio-minos-temporary-expiration +50 -0
  480. rucio-35.7.0.data/scripts/rucio-necromancer +120 -0
  481. rucio-35.7.0.data/scripts/rucio-oauth-manager +63 -0
  482. rucio-35.7.0.data/scripts/rucio-reaper +83 -0
  483. rucio-35.7.0.data/scripts/rucio-replica-recoverer +248 -0
  484. rucio-35.7.0.data/scripts/rucio-rse-decommissioner +66 -0
  485. rucio-35.7.0.data/scripts/rucio-storage-consistency-actions +74 -0
  486. rucio-35.7.0.data/scripts/rucio-transmogrifier +77 -0
  487. rucio-35.7.0.data/scripts/rucio-undertaker +76 -0
  488. rucio-35.7.0.dist-info/METADATA +72 -0
  489. rucio-35.7.0.dist-info/RECORD +493 -0
  490. rucio-35.7.0.dist-info/WHEEL +5 -0
  491. rucio-35.7.0.dist-info/licenses/AUTHORS.rst +97 -0
  492. rucio-35.7.0.dist-info/licenses/LICENSE +201 -0
  493. rucio-35.7.0.dist-info/top_level.txt +1 -0
@@ -0,0 +1,1618 @@
1
+ # Copyright European Organization for Nuclear Research (CERN) since 2012
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import logging
16
+ from collections.abc import Sequence
17
+ from datetime import datetime
18
+ from typing import TYPE_CHECKING, Any, Optional
19
+
20
+ from sqlalchemy import and_, func, select
21
+ from sqlalchemy.exc import NoResultFound
22
+
23
+ import rucio.core.did
24
+ import rucio.core.lock
25
+ import rucio.core.replica
26
+ from rucio.common.config import config_get_int
27
+ from rucio.common.constants import RseAttr
28
+ from rucio.common.exception import InsufficientTargetRSEs
29
+ from rucio.common.types import InternalScope
30
+ from rucio.core import account_counter, rse_counter
31
+ from rucio.core import request as request_core
32
+ from rucio.core.rse import get_rse, get_rse_attribute, get_rse_name
33
+ from rucio.core.rse_selector import RSESelector
34
+ from rucio.db.sqla import models
35
+ from rucio.db.sqla.constants import OBSOLETE, DIDType, LockState, ReplicaState, RequestType, RuleGrouping
36
+ from rucio.db.sqla.session import transactional_session
37
+
38
+ if TYPE_CHECKING:
39
+ from sqlalchemy.orm import Session
40
+
41
+
42
+ @transactional_session
43
+ def apply_rule_grouping(
44
+ datasetfiles: Sequence[dict[str, Any]],
45
+ locks: dict[tuple[InternalScope, str], Sequence[models.ReplicaLock]],
46
+ replicas: dict[tuple[InternalScope, str], Sequence[models.CollectionReplica]],
47
+ source_replicas: dict[tuple[InternalScope, str], Sequence[models.CollectionReplica]],
48
+ rseselector: RSESelector,
49
+ rule: models.ReplicationRule,
50
+ preferred_rse_ids: Optional[Sequence[str]] = None,
51
+ source_rses: Optional[Sequence[str]] = None,
52
+ *,
53
+ session: "Session"
54
+ ) -> tuple[dict[str, list[dict[str, models.RSEFileAssociation]]],
55
+ dict[str, list[dict[str, models.ReplicaLock]]],
56
+ list[dict[str, Any]]]:
57
+ """
58
+ Apply rule grouping to files.
59
+
60
+ :param datasetfiles: Dict holding all datasets and files.
61
+ :param locks: Dict holding all locks.
62
+ :param replicas: Dict holding all replicas.
63
+ :param source_replicas: Dict holding all source_replicas.
64
+ :param rseselector: The RSESelector to be used.
65
+ :param rule: The rule object.
66
+ :param preferred_rse_ids: Preferred RSE's to select.
67
+ :param source_rses: RSE ids of eligible source replicas.
68
+ :param session: Session of the db.
69
+ :returns: Dict of replicas to create, Dict of locks to create, List of transfers to create
70
+ :raises: InsufficientQuota, InsufficientTargetRSEs, RSEOverQuota
71
+ :attention: This method modifies the contents of the locks and replicas input parameters.
72
+ """
73
+
74
+ # locks_to_create = {'rse_id': [locks]}
75
+ # replicas_to_create = {'rse_id': [replicas]}
76
+ # transfers_to_create = [{'dest_rse_id':, 'scope':, 'name':, 'request_type':, 'metadata':}]
77
+
78
+ preferred_rse_ids = preferred_rse_ids or []
79
+ source_rses = source_rses or []
80
+ if rule.grouping == RuleGrouping.NONE:
81
+ replicas_to_create, locks_to_create, \
82
+ transfers_to_create = __apply_rule_to_files_none_grouping(datasetfiles=datasetfiles,
83
+ locks=locks,
84
+ replicas=replicas,
85
+ source_replicas=source_replicas,
86
+ rseselector=rseselector,
87
+ rule=rule,
88
+ preferred_rse_ids=preferred_rse_ids,
89
+ source_rses=source_rses,
90
+ session=session)
91
+ elif rule.grouping == RuleGrouping.ALL:
92
+ replicas_to_create, locks_to_create, \
93
+ transfers_to_create = __apply_rule_to_files_all_grouping(datasetfiles=datasetfiles,
94
+ locks=locks,
95
+ replicas=replicas,
96
+ source_replicas=source_replicas,
97
+ rseselector=rseselector,
98
+ rule=rule,
99
+ preferred_rse_ids=preferred_rse_ids,
100
+ source_rses=source_rses,
101
+ session=session)
102
+ else: # rule.grouping == RuleGrouping.DATASET:
103
+ replicas_to_create, locks_to_create, \
104
+ transfers_to_create = __apply_rule_to_files_dataset_grouping(datasetfiles=datasetfiles,
105
+ locks=locks,
106
+ replicas=replicas,
107
+ source_replicas=source_replicas,
108
+ rseselector=rseselector,
109
+ rule=rule,
110
+ preferred_rse_ids=preferred_rse_ids,
111
+ source_rses=source_rses,
112
+ session=session)
113
+
114
+ return replicas_to_create, locks_to_create, transfers_to_create
115
+
116
+
117
+ @transactional_session
118
+ def repair_stuck_locks_and_apply_rule_grouping(
119
+ datasetfiles: Sequence[dict[str, Any]],
120
+ locks: dict[tuple[InternalScope, str], models.ReplicaLock],
121
+ replicas: dict[tuple[InternalScope, str], Any],
122
+ source_replicas: dict[tuple[InternalScope, str], Any],
123
+ rseselector: RSESelector, rule: models.ReplicationRule,
124
+ source_rses: Sequence[str],
125
+ *,
126
+ session: "Session"
127
+ ) -> tuple[dict[str, list[dict[str, models.RSEFileAssociation]]],
128
+ dict[str, list[dict[str, models.ReplicaLock]]],
129
+ list[dict[str, Any]],
130
+ dict[str, list[dict[str, models.ReplicaLock]]]]:
131
+ """
132
+ Apply rule grouping to files.
133
+
134
+ :param datasetfiles: Dict holding all datasets and files.
135
+ :param locks: Dict holding all locks.
136
+ :param replicas: Dict holding all replicas.
137
+ :param source_replicas: Dict holding all source_replicas.
138
+ :param rseselector: The RSESelector to be used.
139
+ :param rule: The rule object.
140
+ :param source_rses: RSE ids of eligible source_rses.
141
+ :param session: Session of the db.
142
+ :returns: List of replicas to create, List of locks to create, List of transfers to create, List of locks to Delete
143
+ :raises: InsufficientQuota, InsufficientTargetRSEs
144
+ :attention: This method modifies the contents of the locks and replicas input parameters.
145
+ """
146
+
147
+ # locks_to_create = {'rse_id': [locks]}
148
+ # replicas_to_create = {'rse_id': [replicas]}
149
+ # transfers_to_create = [{'dest_rse_id':, 'scope':, 'name':, 'request_type':, 'metadata':}]
150
+ # locks_to_delete = {'rse_id': [locks]}
151
+
152
+ if rule.grouping == RuleGrouping.NONE:
153
+ replicas_to_create, locks_to_create, transfers_to_create, \
154
+ locks_to_delete = __repair_stuck_locks_with_none_grouping(datasetfiles=datasetfiles,
155
+ locks=locks,
156
+ replicas=replicas,
157
+ source_replicas=source_replicas,
158
+ rseselector=rseselector,
159
+ rule=rule,
160
+ source_rses=source_rses,
161
+ session=session)
162
+ elif rule.grouping == RuleGrouping.ALL:
163
+ replicas_to_create, locks_to_create, transfers_to_create, \
164
+ locks_to_delete = __repair_stuck_locks_with_all_grouping(datasetfiles=datasetfiles,
165
+ locks=locks,
166
+ replicas=replicas,
167
+ source_replicas=source_replicas,
168
+ rseselector=rseselector,
169
+ rule=rule,
170
+ source_rses=source_rses,
171
+ session=session)
172
+ else:
173
+ replicas_to_create, locks_to_create, transfers_to_create, \
174
+ locks_to_delete = __repair_stuck_locks_with_dataset_grouping(datasetfiles=datasetfiles,
175
+ locks=locks,
176
+ replicas=replicas,
177
+ source_replicas=source_replicas,
178
+ rseselector=rseselector,
179
+ rule=rule,
180
+ source_rses=source_rses,
181
+ session=session)
182
+ return replicas_to_create, locks_to_create, transfers_to_create, locks_to_delete
183
+
184
+
185
+ @transactional_session
186
+ def create_transfer_dict(dest_rse_id, request_type, scope, name, rule, lock=None, bytes_=None, md5=None, adler32=None, ds_scope=None, ds_name=None, copy_pin_lifetime=None, activity=None, retry_count=None, *, session: "Session"):
187
+ """
188
+ This method creates a transfer dictionary and returns it
189
+
190
+ :param dest_rse_id: The destination RSE id.
191
+ :param request_Type: The request type.
192
+ :param scope: The scope of the file.
193
+ :param name: The name of the file.
194
+ :param rule: The rule responsible for the transfer.
195
+ :param lock: The lock responsible for the transfer.
196
+ :param bytes_: The filesize of the file in bytes.
197
+ :param md5: The md5 checksum of the file.
198
+ :param adler32: The adler32 checksum of the file.
199
+ :param ds_scope: Dataset the file belongs to.
200
+ :param ds_name: Dataset the file belongs to.
201
+ :param copy_pin_lifetime: Lifetime in the case of STAGIN requests.
202
+ :param activity: Activity to be used.
203
+ :param session: Session of the db.
204
+ :returns: Request dictionary.
205
+ """
206
+ attributes = {'activity': activity or rule.activity or 'default',
207
+ 'source_replica_expression': rule.source_replica_expression,
208
+ 'lifetime': copy_pin_lifetime,
209
+ 'ds_scope': ds_scope,
210
+ 'ds_name': ds_name,
211
+ 'bytes': bytes_,
212
+ 'md5': md5,
213
+ 'adler32': adler32,
214
+ 'priority': rule.priority,
215
+ # 'allow_tape_source': has_account_attribute(account=rule.account, key='admin', session=session)}
216
+ 'allow_tape_source': True}
217
+
218
+ return {'dest_rse_id': dest_rse_id,
219
+ 'scope': scope,
220
+ 'name': name,
221
+ 'rule_id': rule.id,
222
+ 'attributes': attributes,
223
+ 'request_type': request_type,
224
+ 'retry_count': retry_count,
225
+ 'account': rule.account,
226
+ 'requested_at': lock.created_at if lock else rule.created_at}
227
+
228
+
229
+ @transactional_session
230
+ def __apply_rule_to_files_none_grouping(
231
+ datasetfiles: Sequence[dict[str, Any]],
232
+ locks: dict[tuple[InternalScope, str], Sequence[models.ReplicaLock]],
233
+ replicas: dict[tuple[InternalScope, str], Sequence[models.CollectionReplica]],
234
+ source_replicas: dict[tuple[InternalScope, str], Sequence[models.CollectionReplica]],
235
+ rseselector: RSESelector,
236
+ rule: models.ReplicationRule,
237
+ preferred_rse_ids: Optional[Sequence[str]] = None,
238
+ source_rses: Optional[Sequence[str]] = None,
239
+ *,
240
+ session: "Session"
241
+ ) -> tuple[dict[str, list[dict[str, models.RSEFileAssociation]]],
242
+ dict[str, list[dict[str, models.ReplicaLock]]],
243
+ list[dict[str, Any]]]:
244
+ """
245
+ Apply a rule to files with NONE grouping.
246
+
247
+ :param datasetfiles: Dict holding all datasets and files.
248
+ :param locks: Dict holding all locks.
249
+ :param replicas: Dict holding all replicas.
250
+ :param source_replicas: Dict holding all source_replicas.
251
+ :param rseselector: The RSESelector to be used.
252
+ :param rule: The rule object.
253
+ :param preferred_rse_ids: Preferred RSE's to select.
254
+ :param source_rses: RSE ids of eligible source replicas.
255
+ :param session: Session of the db.
256
+ :returns: replicas_to_create, locks_to_create, transfers_to_create
257
+ :raises: InsufficientAccountLimit, InsufficientTargetRSEs
258
+ :attention: This method modifies the contents of the locks and replicas input parameters.
259
+ """
260
+ locks_to_create = {} # {'rse_id': [locks]}
261
+ replicas_to_create = {} # {'rse_id': [replicas]}
262
+ transfers_to_create = [] # [{'dest_rse_id':, 'scope':, 'name':, 'request_type':, 'metadata':}]
263
+ preferred_rse_ids = preferred_rse_ids or []
264
+ source_rses = source_rses or []
265
+
266
+ for dataset in datasetfiles:
267
+ selected_rse_ids = []
268
+ for file in dataset['files']:
269
+ if len([lock for lock in locks[(file['scope'], file['name'])] if lock.rule_id == rule.id]) == rule.copies:
270
+ # Nothing to do as the file already has the requested amount of locks
271
+ continue
272
+ rse_coverage = {str(replica.rse_id): file['bytes'] for replica in replicas[(file['scope'], file['name'])] if replica.state in (ReplicaState.AVAILABLE, ReplicaState.COPYING, ReplicaState.TEMPORARY_UNAVAILABLE)}
273
+ if len(preferred_rse_ids) == 0:
274
+ rse_tuples = rseselector.select_rse(size=file['bytes'],
275
+ preferred_rse_ids=rse_coverage.keys(),
276
+ blocklist=[str(replica.rse_id) for replica in replicas[(file['scope'], file['name'])] if replica.state == ReplicaState.BEING_DELETED],
277
+ existing_rse_size=rse_coverage)
278
+ else:
279
+ rse_tuples = rseselector.select_rse(size=file['bytes'],
280
+ preferred_rse_ids=preferred_rse_ids,
281
+ blocklist=[str(replica.rse_id) for replica in replicas[(file['scope'], file['name'])] if replica.state == ReplicaState.BEING_DELETED],
282
+ existing_rse_size=rse_coverage)
283
+ for rse_tuple in rse_tuples:
284
+ if len([lock for lock in locks[(file['scope'], file['name'])] if lock.rule_id == rule.id and lock.rse_id == rse_tuple[0]]) == 1:
285
+ # Due to a bug a lock could have been already submitted for this, in that case, skip it
286
+ continue
287
+ __create_lock_and_replica(file=file,
288
+ dataset=dataset,
289
+ rule=rule,
290
+ rse_id=rse_tuple[0],
291
+ staging_area=rse_tuple[1],
292
+ availability_write=rse_tuple[2],
293
+ locks_to_create=locks_to_create,
294
+ locks=locks,
295
+ source_rses=source_rses,
296
+ replicas_to_create=replicas_to_create,
297
+ replicas=replicas,
298
+ source_replicas=source_replicas,
299
+ transfers_to_create=transfers_to_create,
300
+ session=session)
301
+ selected_rse_ids.append(rse_tuple[0])
302
+ if dataset['scope'] is not None:
303
+ for rse_id in list(set(selected_rse_ids)):
304
+ try:
305
+ stmt = select(
306
+ models.CollectionReplica
307
+ ).where(
308
+ and_(models.CollectionReplica.scope == dataset['scope'],
309
+ models.CollectionReplica.name == dataset['name'],
310
+ models.CollectionReplica.rse_id == rse_id)
311
+ )
312
+ session.execute(stmt).one()
313
+ except NoResultFound:
314
+ models.CollectionReplica(scope=dataset['scope'],
315
+ name=dataset['name'],
316
+ did_type=DIDType.DATASET,
317
+ rse_id=rse_id,
318
+ bytes=0,
319
+ length=0,
320
+ available_bytes=0,
321
+ available_replicas_cnt=0,
322
+ state=ReplicaState.UNAVAILABLE).save(session=session)
323
+ models.UpdatedCollectionReplica(scope=dataset['scope'],
324
+ name=dataset['name'],
325
+ did_type=DIDType.DATASET).save(flush=False, session=session)
326
+
327
+ return replicas_to_create, locks_to_create, transfers_to_create
328
+
329
+
330
+ @transactional_session
331
+ def __apply_rule_to_files_all_grouping(
332
+ datasetfiles: Sequence[dict[str, Any]],
333
+ locks: dict[tuple[InternalScope, str], Sequence[models.ReplicaLock]],
334
+ replicas: dict[tuple[InternalScope, str], Sequence[models.CollectionReplica]],
335
+ source_replicas: dict[tuple[InternalScope, str], Sequence[models.CollectionReplica]],
336
+ rseselector: RSESelector,
337
+ rule: models.ReplicationRule,
338
+ preferred_rse_ids: Optional[Sequence[str]] = None,
339
+ source_rses: Optional[Sequence[str]] = None,
340
+ *,
341
+ session: "Session"
342
+ ) -> tuple[dict[str, list[dict[str, models.RSEFileAssociation]]],
343
+ dict[str, list[dict[str, models.ReplicaLock]]],
344
+ list[dict[str, Any]]]:
345
+ """
346
+ Apply a rule to files with ALL grouping.
347
+
348
+ :param datasetfiles: Dict holding all datasets and files.
349
+ :param locks: Dict holding all locks.
350
+ :param replicas: Dict holding all replicas.
351
+ :param source_replicas: Dict holding all source_replicas.
352
+ :param rseselector: The RSESelector to be used.
353
+ :param rule: The rule object.
354
+ :param preferred_rse_ids: Preferred RSE's to select.
355
+ :param source_rses: RSE ids of eligible source replicas.
356
+ :param session: Session of the db.
357
+ :returns: replicas_to_create, locks_to_create, transfers_to_create
358
+ :raises: InsufficientQuota, InsufficientTargetRSEs
359
+ :attention: This method modifies the contents of the locks and replicas input parameters.
360
+ """
361
+
362
+ locks_to_create = {} # {'rse_id': [locks]}
363
+ replicas_to_create = {} # {'rse_id': [replicas]}
364
+ transfers_to_create = [] # [{'dest_rse_id':, 'scope':, 'name':, 'request_type':, 'metadata':}]
365
+ preferred_rse_ids = preferred_rse_ids or []
366
+ source_rses = source_rses or []
367
+
368
+ bytes_ = 0
369
+ rse_coverage = {} # {'rse_id': coverage }
370
+ blocklist = set()
371
+ for dataset in datasetfiles:
372
+ for file in dataset['files']:
373
+ bytes_ += file['bytes']
374
+ for replica in replicas[(file['scope'], file['name'])]:
375
+ if replica.state == ReplicaState.BEING_DELETED:
376
+ blocklist.add(replica.rse_id)
377
+ continue
378
+ if replica.state in [ReplicaState.AVAILABLE, ReplicaState.COPYING, ReplicaState.TEMPORARY_UNAVAILABLE]:
379
+ if replica.rse_id in rse_coverage:
380
+ rse_coverage[replica.rse_id] += file['bytes']
381
+ else:
382
+ rse_coverage[replica.rse_id] = file['bytes']
383
+
384
+ if not preferred_rse_ids:
385
+ rse_tuples = rseselector.select_rse(size=bytes_,
386
+ preferred_rse_ids=[x[0] for x in sorted(rse_coverage.items(), key=lambda tup: tup[1], reverse=True)],
387
+ blocklist=list(blocklist),
388
+ prioritize_order_over_weight=True,
389
+ existing_rse_size=rse_coverage)
390
+ else:
391
+ rse_tuples = rseselector.select_rse(size=bytes_,
392
+ preferred_rse_ids=preferred_rse_ids,
393
+ blocklist=list(blocklist),
394
+ existing_rse_size=rse_coverage)
395
+ for rse_tuple in rse_tuples:
396
+ for dataset in datasetfiles:
397
+ for file in dataset['files']:
398
+ if len([lock for lock in locks[(file['scope'], file['name'])] if lock.rule_id == rule.id]) == rule.copies:
399
+ continue
400
+ if len([lock for lock in locks[(file['scope'], file['name'])] if lock.rule_id == rule.id and lock.rse_id == rse_tuple[0]]) == 1:
401
+ # Due to a bug a lock could have been already submitted for this, in that case, skip it
402
+ continue
403
+ __create_lock_and_replica(file=file,
404
+ dataset=dataset,
405
+ rule=rule,
406
+ rse_id=rse_tuple[0],
407
+ staging_area=rse_tuple[1],
408
+ availability_write=rse_tuple[2],
409
+ locks_to_create=locks_to_create,
410
+ locks=locks,
411
+ source_rses=source_rses,
412
+ replicas_to_create=replicas_to_create,
413
+ replicas=replicas,
414
+ source_replicas=source_replicas,
415
+ transfers_to_create=transfers_to_create,
416
+ session=session)
417
+ # Add a DatasetLock to the DB
418
+ if dataset['scope'] is not None:
419
+ try:
420
+ stmt = select(
421
+ models.DatasetLock
422
+ ).where(
423
+ and_(models.DatasetLock.scope == dataset['scope'],
424
+ models.DatasetLock.name == dataset['name'],
425
+ models.DatasetLock.rule_id == rule.id,
426
+ models.DatasetLock.rse_id == rse_tuple[0])
427
+ )
428
+ session.execute(stmt).one()
429
+ except NoResultFound:
430
+ # Get dataset Information
431
+ is_open, bytes_, length = True, 0, 0
432
+ try:
433
+ stmt = select(
434
+ models.DataIdentifier.is_open,
435
+ models.DataIdentifier.bytes,
436
+ models.DataIdentifier.length
437
+ ).where(
438
+ and_(models.DataIdentifier.scope == dataset['scope'],
439
+ models.DataIdentifier.name == dataset['name'])
440
+ )
441
+ is_open, bytes_, length = session.execute(stmt).one()
442
+ except NoResultFound:
443
+ pass
444
+
445
+ models.DatasetLock(scope=dataset['scope'],
446
+ name=dataset['name'],
447
+ rule_id=rule.id,
448
+ rse_id=rse_tuple[0],
449
+ state=LockState.REPLICATING,
450
+ account=rule.account,
451
+ length=length if not is_open else None,
452
+ bytes=bytes_ if not is_open else None).save(flush=False, session=session)
453
+ # Add a Dataset Replica to the DB
454
+ if dataset['scope'] is not None:
455
+ try:
456
+ stmt = select(
457
+ models.CollectionReplica
458
+ ).where(
459
+ and_(models.CollectionReplica.scope == dataset['scope'],
460
+ models.CollectionReplica.name == dataset['name'],
461
+ models.CollectionReplica.rse_id == rse_tuple[0])
462
+ )
463
+ session.execute(stmt).one()
464
+ except NoResultFound:
465
+ models.CollectionReplica(scope=dataset['scope'],
466
+ name=dataset['name'],
467
+ did_type=DIDType.DATASET,
468
+ rse_id=rse_tuple[0],
469
+ bytes=0,
470
+ length=0,
471
+ available_bytes=0,
472
+ available_replicas_cnt=0,
473
+ state=ReplicaState.UNAVAILABLE).save(session=session)
474
+ models.UpdatedCollectionReplica(scope=dataset['scope'],
475
+ name=dataset['name'],
476
+ did_type=DIDType.DATASET).save(flush=False, session=session)
477
+
478
+ return replicas_to_create, locks_to_create, transfers_to_create
479
+
480
+
481
+ @transactional_session
482
+ def __apply_rule_to_files_dataset_grouping(
483
+ datasetfiles: Sequence[dict[str, Any]],
484
+ locks: dict[tuple[InternalScope, str], Sequence[models.ReplicaLock]],
485
+ replicas: dict[tuple[InternalScope, str], Sequence[models.CollectionReplica]],
486
+ source_replicas: dict[tuple[InternalScope, str], Sequence[models.CollectionReplica]],
487
+ rseselector: RSESelector,
488
+ rule: models.ReplicationRule,
489
+ preferred_rse_ids: Optional[Sequence[str]] = None,
490
+ source_rses: Optional[Sequence[str]] = None,
491
+ *,
492
+ session: "Session"
493
+ ) -> tuple[dict[str, list[dict[str, models.RSEFileAssociation]]],
494
+ dict[str, list[dict[str, models.ReplicaLock]]],
495
+ list[dict[str, Any]]]:
496
+ """
497
+ Apply a rule to files with DATASET grouping.
498
+
499
+ :param datasetfiles: Dict holding all datasets and files.
500
+ :param locks: Dict holding all locks.
501
+ :param replicas: Dict holding all replicas.
502
+ :param source_replicas: Dict holding all source replicas.
503
+ :param rseselector: The RSESelector to be used.
504
+ :param rule: The rule object.
505
+ :param preferred_rse_ids: Preferred RSE's to select.
506
+ :param source_rses: RSE ids of eligible source replicas.
507
+ :param session: Session of the db.
508
+ :returns: replicas_to_create, locks_to_create, transfers_to_create
509
+ :raises: InsufficientQuota, InsufficientTargetRSEs
510
+ :attention: This method modifies the contents of the locks and replicas input parameters.
511
+ """
512
+ locks_to_create = {} # {'rse_id': [locks]}
513
+ replicas_to_create = {} # {'rse_id': [replicas]}
514
+ transfers_to_create = [] # [{'dest_rse_id':, 'scope':, 'name':, 'request_type':, 'metadata':}]
515
+ preferred_rse_ids = preferred_rse_ids or []
516
+ source_rses = source_rses or []
517
+
518
+ for dataset in datasetfiles:
519
+ bytes_ = sum([file['bytes'] for file in dataset['files']])
520
+ rse_coverage = {} # {'rse_id': coverage }
521
+ blocklist = set()
522
+ for file in dataset['files']:
523
+ for replica in replicas[(file['scope'], file['name'])]:
524
+ if replica.state == ReplicaState.BEING_DELETED:
525
+ blocklist.add(replica.rse_id)
526
+ continue
527
+ if replica.state in [ReplicaState.AVAILABLE, ReplicaState.COPYING, ReplicaState.TEMPORARY_UNAVAILABLE]:
528
+ if replica.rse_id in rse_coverage:
529
+ rse_coverage[replica.rse_id] += file['bytes']
530
+ else:
531
+ rse_coverage[replica.rse_id] = file['bytes']
532
+
533
+ if not preferred_rse_ids:
534
+ rse_tuples = rseselector.select_rse(size=bytes_,
535
+ preferred_rse_ids=[x[0] for x in sorted(rse_coverage.items(), key=lambda tup: tup[1], reverse=True)],
536
+ blocklist=list(blocklist),
537
+ prioritize_order_over_weight=True,
538
+ existing_rse_size=rse_coverage)
539
+ else:
540
+ rse_tuples = rseselector.select_rse(size=bytes_,
541
+ preferred_rse_ids=preferred_rse_ids,
542
+ blocklist=list(blocklist),
543
+ existing_rse_size=rse_coverage)
544
+ for rse_tuple in rse_tuples:
545
+ for file in dataset['files']:
546
+ if len([lock for lock in locks[(file['scope'], file['name'])] if lock.rule_id == rule.id]) == rule.copies:
547
+ continue
548
+ if len([lock for lock in locks[(file['scope'], file['name'])] if lock.rule_id == rule.id and lock.rse_id == rse_tuple[0]]) == 1:
549
+ # Due to a bug a lock could have been already submitted for this, in that case, skip it
550
+ continue
551
+ __create_lock_and_replica(file=file,
552
+ dataset=dataset,
553
+ rule=rule,
554
+ rse_id=rse_tuple[0],
555
+ staging_area=rse_tuple[1],
556
+ availability_write=rse_tuple[2],
557
+ locks_to_create=locks_to_create,
558
+ locks=locks,
559
+ source_rses=source_rses,
560
+ replicas_to_create=replicas_to_create,
561
+ replicas=replicas,
562
+ source_replicas=source_replicas,
563
+ transfers_to_create=transfers_to_create,
564
+ session=session)
565
+ # Add a DatasetLock to the DB
566
+ if dataset['scope'] is not None:
567
+ try:
568
+ stmt = select(
569
+ models.DatasetLock
570
+ ).where(
571
+ and_(models.DatasetLock.scope == dataset['scope'],
572
+ models.DatasetLock.name == dataset['name'],
573
+ models.DatasetLock.rule_id == rule.id,
574
+ models.DatasetLock.rse_id == rse_tuple[0])
575
+ )
576
+ session.execute(stmt).one()
577
+ except NoResultFound:
578
+ # Get dataset Information
579
+ is_open, bytes_, length = True, None, None
580
+ try:
581
+ stmt = select(
582
+ models.DataIdentifier.is_open,
583
+ models.DataIdentifier.bytes,
584
+ models.DataIdentifier.length
585
+ ).where(
586
+ and_(models.DataIdentifier.scope == dataset['scope'],
587
+ models.DataIdentifier.name == dataset['name'])
588
+ )
589
+ is_open, bytes_, length = session.execute(stmt).one()
590
+ except NoResultFound:
591
+ pass
592
+
593
+ models.DatasetLock(scope=dataset['scope'],
594
+ name=dataset['name'],
595
+ rule_id=rule.id,
596
+ rse_id=rse_tuple[0],
597
+ state=LockState.REPLICATING,
598
+ account=rule.account,
599
+ length=length if not is_open else None,
600
+ bytes=bytes_ if not is_open else None).save(flush=False, session=session)
601
+
602
+ # Add a Dataset Replica to the DB
603
+ if dataset['scope'] is not None:
604
+ try:
605
+ stmt = select(
606
+ models.CollectionReplica
607
+ ).where(
608
+ and_(models.CollectionReplica.scope == dataset['scope'],
609
+ models.CollectionReplica.name == dataset['name'],
610
+ models.CollectionReplica.rse_id == rse_tuple[0])
611
+ )
612
+ session.execute(stmt).one()
613
+ except NoResultFound:
614
+ models.CollectionReplica(scope=dataset['scope'],
615
+ name=dataset['name'],
616
+ did_type=DIDType.DATASET,
617
+ rse_id=rse_tuple[0],
618
+ bytes=0,
619
+ length=0,
620
+ available_bytes=0,
621
+ available_replicas_cnt=0,
622
+ state=ReplicaState.UNAVAILABLE).save(session=session)
623
+ models.UpdatedCollectionReplica(scope=dataset['scope'],
624
+ name=dataset['name'],
625
+ did_type=DIDType.DATASET).save(flush=False, session=session)
626
+
627
+ return replicas_to_create, locks_to_create, transfers_to_create
628
+
629
+
630
+ @transactional_session
631
+ def __repair_stuck_locks_with_none_grouping(datasetfiles, locks, replicas, source_replicas, rseselector, rule, source_rses, *, session: "Session", logger=logging.log):
632
+ """
633
+ Apply a rule to files with NONE grouping.
634
+
635
+ :param datasetfiles: Dict holding all datasets and files.
636
+ :param locks: Dict holding all locks.
637
+ :param replicas: Dict holding all replicas.
638
+ :param source_replicas: Dict holding all source_replicas.
639
+ :param rseselector: The RSESelector to be used.
640
+ :param rule: The rule object.
641
+ :param source_rses: RSE ids of eligible source replicas.
642
+ :param session: Session of the db.
643
+ :param logger: Optional decorated logger that can be passed from the calling daemons or servers.
644
+ :returns: replicas_to_create, locks_to_create, transfers_to_create, locks_to_delete
645
+ :raises: InsufficientAccountLimit, InsufficientTargetRSEs
646
+ :attention: This method modifies the contents of the locks and replicas input parameters.
647
+ """
648
+
649
+ locks_to_create = {} # {'rse_id': [locks]}
650
+ replicas_to_create = {} # {'rse_id': [replicas]}
651
+ transfers_to_create = [] # [{'dest_rse_id':, 'scope':, 'name':, 'request_type':, 'metadata':}]
652
+ locks_to_delete = {} # {'rse_id': [locks]}
653
+
654
+ selector_rse_dict = rseselector.get_rse_dictionary()
655
+
656
+ # Iterate the datasetfiles structure and search for stuck locks
657
+ for dataset in datasetfiles:
658
+ for file in dataset['files']:
659
+ # Iterate and try to repair STUCK locks
660
+ for lock in [stucked_lock for stucked_lock in locks[(file['scope'], file['name'])] if stucked_lock.rule_id == rule.id and stucked_lock.state == LockState.STUCK]:
661
+ # Check if there are actually already enough locks
662
+ if len([good_lock for good_lock in locks[(file['scope'], file['name'])] if good_lock.rule_id == rule.id and good_lock.state != LockState.STUCK]) >= rule.copies:
663
+ # Remove the lock
664
+ logger(logging.DEBUG, 'There are too many locks for %s:%s for rule %s. Deleting lock', file['scope'], file['name'], str(rule.id))
665
+ if lock.rse_id in locks_to_delete:
666
+ locks_to_delete[lock.rse_id].append(lock)
667
+ else:
668
+ locks_to_delete[lock.rse_id] = [lock]
669
+ rule.locks_stuck_cnt -= 1
670
+ continue
671
+ # Check if the replica is AVAILABLE now
672
+ if [replica for replica in replicas[(file['scope'], file['name'])] if replica.state in [ReplicaState.AVAILABLE, ReplicaState.TEMPORARY_UNAVAILABLE] and replica.rse_id == lock.rse_id]:
673
+ lock.state = LockState.OK
674
+ rule.locks_stuck_cnt -= 1
675
+ rule.locks_ok_cnt += 1
676
+ # Recalculate the replica_lock_cnt
677
+ associated_replica = [replica for replica in replicas[(file['scope'], file['name'])] if replica.state in [ReplicaState.AVAILABLE, ReplicaState.TEMPORARY_UNAVAILABLE] and replica.rse_id == lock.rse_id][0]
678
+ associated_replica.tombstone = None
679
+ stmt = select(
680
+ func.count(models.ReplicaLock.rule_id)
681
+ ).select_from(
682
+ models.ReplicaLock
683
+ ).where(
684
+ and_(models.ReplicaLock.scope == associated_replica.scope,
685
+ models.ReplicaLock.name == associated_replica.name,
686
+ models.ReplicaLock.rse_id == lock.rse_id)
687
+ )
688
+ associated_replica.lock_cnt = session.execute(stmt).scalar_one()
689
+ continue
690
+ # Check if this is a STUCK lock due to source_replica filtering
691
+ if source_rses:
692
+ associated_replica = [replica for replica in replicas[(file['scope'], file['name'])] if replica.rse_id == lock.rse_id][0]
693
+ # Check if there is an eligible source replica for this lock
694
+ if set(source_replicas.get((file['scope'], file['name']), [])).intersection(source_rses) and (selector_rse_dict.get(lock.rse_id, {}).get('availability_write', True) or rule.ignore_availability):
695
+ __update_lock_replica_and_create_transfer(lock=lock,
696
+ replica=associated_replica,
697
+ rule=rule,
698
+ dataset=dataset,
699
+ transfers_to_create=transfers_to_create,
700
+ session=session)
701
+ else:
702
+ blocklist_rses = [bl_lock.rse_id for bl_lock in locks[(file['scope'], file['name'])] if bl_lock.rule_id == rule.id]
703
+ try:
704
+ rse_coverage = {replica.rse_id: file['bytes'] for replica in replicas[(file['scope'], file['name'])] if replica.state in (ReplicaState.AVAILABLE, ReplicaState.COPYING, ReplicaState.TEMPORARY_UNAVAILABLE)}
705
+ rse_tuples = rseselector.select_rse(size=file['bytes'],
706
+ preferred_rse_ids=rse_coverage.keys(),
707
+ copies=1,
708
+ blocklist=[replica.rse_id for replica in replicas[(file['scope'], file['name'])] if replica.state == ReplicaState.BEING_DELETED] + blocklist_rses + [lock.rse_id],
709
+ existing_rse_size=rse_coverage)
710
+ for rse_tuple in rse_tuples:
711
+ __create_lock_and_replica(file=file,
712
+ dataset=dataset,
713
+ rule=rule,
714
+ rse_id=rse_tuple[0],
715
+ staging_area=rse_tuple[1],
716
+ availability_write=rse_tuple[2],
717
+ locks_to_create=locks_to_create,
718
+ locks=locks,
719
+ source_rses=source_rses,
720
+ replicas_to_create=replicas_to_create,
721
+ replicas=replicas,
722
+ source_replicas=source_replicas,
723
+ transfers_to_create=transfers_to_create,
724
+ session=session)
725
+ rule.locks_stuck_cnt -= 1
726
+ __set_replica_unavailable(replica=[replica for replica in replicas[(file['scope'], file['name'])] if replica.rse_id == lock.rse_id][0],
727
+ session=session)
728
+ if lock.rse_id in locks_to_delete:
729
+ locks_to_delete[lock.rse_id].append(lock)
730
+ else:
731
+ locks_to_delete[lock.rse_id] = [lock]
732
+ except InsufficientTargetRSEs:
733
+ # Just retry the already existing lock
734
+ if __is_retry_required(lock=lock, activity=rule.activity) and (selector_rse_dict.get(lock.rse_id, {}).get('availability_write', True) or rule.ignore_availability):
735
+ associated_replica = [replica for replica in replicas[(file['scope'], file['name'])] if replica.rse_id == lock.rse_id][0]
736
+ __update_lock_replica_and_create_transfer(lock=lock,
737
+ replica=associated_replica,
738
+ rule=rule,
739
+ dataset=dataset,
740
+ transfers_to_create=transfers_to_create,
741
+ session=session)
742
+
743
+ return replicas_to_create, locks_to_create, transfers_to_create, locks_to_delete
744
+
745
+
746
+ @transactional_session
747
+ def __repair_stuck_locks_with_all_grouping(datasetfiles, locks, replicas, source_replicas, rseselector, rule, source_rses, *, session: "Session", logger=logging.log):
748
+ """
749
+ Apply a rule to files with ALL grouping.
750
+
751
+ :param datasetfiles: Dict holding all datasets and files.
752
+ :param locks: Dict holding all locks.
753
+ :param replicas: Dict holding all replicas.
754
+ :param source_replicas: Dict holding all source_replicas.
755
+ :param rseselector: The RSESelector to be used.
756
+ :param rule: The rule object.
757
+ :param source_rses: RSE ids of eligible source replicas.
758
+ :param session: Session of the db.
759
+ :param logger: Optional decorated logger that can be passed from the calling daemons or servers.
760
+ :returns: replicas_to_create, locks_to_create, transfers_to_create, locks_to_delete
761
+ :raises: InsufficientAccountLimit, InsufficientTargetRSEs
762
+ :attention: This method modifies the contents of the locks and replicas input parameters.
763
+ """
764
+
765
+ locks_to_create = {} # {'rse_id': [locks]}
766
+ replicas_to_create = {} # {'rse_id': [replicas]}
767
+ transfers_to_create = [] # [{'dest_rse_id':, 'scope':, 'name':, 'request_type':, 'metadata':}]
768
+ locks_to_delete = {} # {'rse_id': [locks]}
769
+
770
+ selector_rse_dict = rseselector.get_rse_dictionary()
771
+
772
+ # Iterate the datasetfiles structure and search for stuck locks
773
+ for dataset in datasetfiles:
774
+ for file in dataset['files']:
775
+ # Iterate and try to repair STUCK locks
776
+ for lock in [stucked_lock for stucked_lock in locks[(file['scope'], file['name'])] if stucked_lock.rule_id == rule.id and stucked_lock.state == LockState.STUCK]:
777
+ # Check if there are actually already enough locks
778
+ if len([good_lock for good_lock in locks[(file['scope'], file['name'])] if good_lock.rule_id == rule.id and good_lock.state != LockState.STUCK]) >= rule.copies:
779
+ # Remove the lock
780
+ logger(logging.DEBUG, 'There are too many locks for %s:%s for rule %s. Deleting lock', file['scope'], file['name'], str(rule.id))
781
+ if lock.rse_id in locks_to_delete:
782
+ locks_to_delete[lock.rse_id].append(lock)
783
+ else:
784
+ locks_to_delete[lock.rse_id] = [lock]
785
+ rule.locks_stuck_cnt -= 1
786
+ continue
787
+ # Check if the replica is AVAILABLE now
788
+ if [replica for replica in replicas[(file['scope'], file['name'])] if replica.state in [ReplicaState.AVAILABLE, ReplicaState.TEMPORARY_UNAVAILABLE] and replica.rse_id == lock.rse_id]:
789
+ lock.state = LockState.OK
790
+ rule.locks_stuck_cnt -= 1
791
+ rule.locks_ok_cnt += 1
792
+ # Recalculate the replica_lock_cnt
793
+ associated_replica = [replica for replica in replicas[(file['scope'], file['name'])] if replica.state in [ReplicaState.AVAILABLE, ReplicaState.TEMPORARY_UNAVAILABLE] and replica.rse_id == lock.rse_id][0]
794
+ associated_replica.tombstone = None
795
+ stmt = select(
796
+ func.count(models.ReplicaLock.rule_id)
797
+ ).select_from(
798
+ models.ReplicaLock
799
+ ).where(
800
+ and_(models.ReplicaLock.scope == associated_replica.scope,
801
+ models.ReplicaLock.name == associated_replica.name,
802
+ models.ReplicaLock.rse_id == lock.rse_id)
803
+ )
804
+ associated_replica.lock_cnt = session.execute(stmt).scalar_one()
805
+ continue
806
+ # Check if this is a STUCK lock due to source_replica filtering
807
+ if source_rses:
808
+ associated_replica = [replica for replica in replicas[(file['scope'], file['name'])] if replica.rse_id == lock.rse_id][0]
809
+ # Check if there is an eligible source replica for this lock
810
+ if set(source_replicas.get((file['scope'], file['name']), [])).intersection(source_rses) and (selector_rse_dict.get(lock.rse_id, {}).get('availability_write', True) or rule.ignore_availability):
811
+ __update_lock_replica_and_create_transfer(lock=lock,
812
+ replica=associated_replica,
813
+ rule=rule,
814
+ dataset=dataset,
815
+ transfers_to_create=transfers_to_create,
816
+ session=session)
817
+ else:
818
+ # Just retry the already existing lock
819
+ if __is_retry_required(lock=lock, activity=rule.activity) and (selector_rse_dict.get(lock.rse_id, {}).get('availability_write', True) or rule.ignore_availability):
820
+ associated_replica = [replica for replica in replicas[(file['scope'], file['name'])] if replica.rse_id == lock.rse_id][0]
821
+ __update_lock_replica_and_create_transfer(lock=lock,
822
+ replica=associated_replica,
823
+ rule=rule,
824
+ dataset=dataset,
825
+ transfers_to_create=transfers_to_create,
826
+ session=session)
827
+
828
+ return replicas_to_create, locks_to_create, transfers_to_create, locks_to_delete
829
+
830
+
831
+ @transactional_session
832
+ def __repair_stuck_locks_with_dataset_grouping(datasetfiles, locks, replicas, source_replicas, rseselector, rule, source_rses, *, session: "Session", logger=logging.log):
833
+ """
834
+ Apply a rule to files with DATASET grouping.
835
+
836
+ :param datasetfiles: Dict holding all datasets and files.
837
+ :param locks: Dict holding all locks.
838
+ :param replicas: Dict holding all replicas.
839
+ :param source_replicas: Dict holding all source_replicas.
840
+ :param rseselector: The RSESelector to be used.
841
+ :param rule: The rule object.
842
+ :param source_rses: RSE ids of eligible source replicas.
843
+ :param session: Session of the db.
844
+ :param logger: Optional decorated logger that can be passed from the calling daemons or servers.
845
+ :returns: replicas_to_create, locks_to_create, transfers_to_create, locks_to_delete
846
+ :raises: InsufficientAccountLimit, InsufficientTargetRSEs
847
+ :attention: This method modifies the contents of the locks and replicas input parameters.
848
+ """
849
+
850
+ locks_to_create = {} # {'rse_id': [locks]}
851
+ replicas_to_create = {} # {'rse_id': [replicas]}
852
+ transfers_to_create = [] # [{'dest_rse_id':, 'scope':, 'name':, 'request_type':, 'metadata':}]
853
+ locks_to_delete = {} # {'rse_id': [locks]}
854
+
855
+ selector_rse_dict = rseselector.get_rse_dictionary()
856
+
857
+ # Iterate the datasetfiles structure and search for stuck locks
858
+ for dataset in datasetfiles:
859
+ for file in dataset['files']:
860
+ # Iterate and try to repair STUCK locks
861
+ for lock in [stucked_lock for stucked_lock in locks[(file['scope'], file['name'])] if stucked_lock.rule_id == rule.id and stucked_lock.state == LockState.STUCK]:
862
+ # Check if there are actually already enough locks
863
+ if len([good_lock for good_lock in locks[(file['scope'], file['name'])] if good_lock.rule_id == rule.id and good_lock.state != LockState.STUCK]) >= rule.copies:
864
+ # Remove the lock
865
+ logger(logging.DEBUG, 'There are too many locks for %s:%s for rule %s. Deleting lock', file['scope'], file['name'], str(rule.id))
866
+ if lock.rse_id in locks_to_delete:
867
+ locks_to_delete[lock.rse_id].append(lock)
868
+ else:
869
+ locks_to_delete[lock.rse_id] = [lock]
870
+ rule.locks_stuck_cnt -= 1
871
+ continue
872
+ # Check if the replica is AVAILABLE now
873
+ if [replica for replica in replicas[(file['scope'], file['name'])] if replica.state in [ReplicaState.AVAILABLE, ReplicaState.TEMPORARY_UNAVAILABLE] and replica.rse_id == lock.rse_id]:
874
+ lock.state = LockState.OK
875
+ rule.locks_stuck_cnt -= 1
876
+ rule.locks_ok_cnt += 1
877
+ # Recalculate the replica_lock_cnt
878
+ associated_replica = [replica for replica in replicas[(file['scope'], file['name'])] if replica.state in [ReplicaState.AVAILABLE, ReplicaState.TEMPORARY_UNAVAILABLE] and replica.rse_id == lock.rse_id][0]
879
+ associated_replica.tombstone = None
880
+ stmt = select(
881
+ func.count(models.ReplicaLock.rule_id)
882
+ ).select_from(
883
+ models.ReplicaLock
884
+ ).where(
885
+ and_(models.ReplicaLock.scope == associated_replica.scope,
886
+ models.ReplicaLock.name == associated_replica.name,
887
+ models.ReplicaLock.rse_id == lock.rse_id)
888
+ )
889
+ associated_replica.lock_cnt = session.execute(stmt).scalar_one()
890
+ continue
891
+ # Check if this is a STUCK lock due to source_replica filtering
892
+ if source_rses:
893
+ associated_replica = [replica for replica in replicas[(file['scope'], file['name'])] if replica.rse_id == lock.rse_id][0]
894
+ # Check if there is an eligible source replica for this lock
895
+ if set(source_replicas.get((file['scope'], file['name']), [])).intersection(source_rses) and (selector_rse_dict.get(lock.rse_id, {}).get('availability_write', True) or rule.ignore_availability):
896
+ __update_lock_replica_and_create_transfer(lock=lock,
897
+ replica=associated_replica,
898
+ rule=rule,
899
+ dataset=dataset,
900
+ transfers_to_create=transfers_to_create,
901
+ session=session)
902
+ else:
903
+ # Just retry the already existing lock
904
+ if __is_retry_required(lock=lock, activity=rule.activity) and (selector_rse_dict.get(lock.rse_id, {}).get('availability_write', True) or rule.ignore_availability):
905
+ associated_replica = [replica for replica in replicas[(file['scope'], file['name'])] if replica.rse_id == lock.rse_id][0]
906
+ __update_lock_replica_and_create_transfer(lock=lock,
907
+ replica=associated_replica,
908
+ rule=rule,
909
+ dataset=dataset,
910
+ transfers_to_create=transfers_to_create,
911
+ session=session)
912
+
913
+ return replicas_to_create, locks_to_create, transfers_to_create, locks_to_delete
914
+
915
+
916
+ def __is_retry_required(lock, activity):
917
+ """
918
+ :param lock: The lock to check.
919
+ :param activity: The activity of the rule.
920
+ """
921
+
922
+ created_at_diff = (datetime.utcnow() - lock.created_at).days * 24 * 3600 + (datetime.utcnow() - lock.created_at).seconds
923
+ updated_at_diff = (datetime.utcnow() - lock.updated_at).days * 24 * 3600 + (datetime.utcnow() - lock.updated_at).seconds
924
+
925
+ if activity == 'Express':
926
+ if updated_at_diff > 3600 * 2:
927
+ return True
928
+ elif activity == 'DebugJudge':
929
+ return True
930
+ elif created_at_diff < 24 * 3600: # First Day
931
+ # Retry every 2 hours
932
+ if updated_at_diff > 3600 * 2:
933
+ return True
934
+ elif created_at_diff < 2 * 24 * 3600: # Second Day
935
+ # Retry every 4 hours
936
+ if updated_at_diff > 3600 * 4:
937
+ return True
938
+ elif created_at_diff < 3 * 24 * 3600: # Third Day
939
+ # Retry every 6 hours
940
+ if updated_at_diff > 3600 * 6:
941
+ return True
942
+ else: # Four and more days
943
+ if updated_at_diff > 3600 * 8:
944
+ return True
945
+ return False
946
+
947
+
948
+ @transactional_session
949
+ def __create_lock_and_replica(file, dataset, rule, rse_id, staging_area, availability_write, locks_to_create, locks, source_rses, replicas_to_create, replicas, source_replicas, transfers_to_create, *, session: "Session", logger=logging.log):
950
+ """
951
+ This method creates a lock and if necessary a new replica and fills the corresponding dictionaries.
952
+
953
+ :param file: File dictionary holding the file information.
954
+ :param dataset: Dataset dictionary holding the dataset information.
955
+ :param rule: Rule object.
956
+ :param rse_id: RSE id the lock and replica should be created at.
957
+ :param staging_area: Boolean variable if the RSE is a staging area.
958
+ :param availability_write: Boolean variable if the RSE is write enabled.
959
+ :param locks_to_create: Dictionary of the locks to create.
960
+ :param locks: Dictionary of all locks.
961
+ :param source_rses: RSE ids of eligible source replicas.
962
+ :param replicas_to_create: Dictionary of the replicas to create.
963
+ :param replicas: Dictionary of the replicas.
964
+ :param source_replicas: Dictionary of the source replicas.
965
+ :param transfers_to_create: List of transfers to create.
966
+ :param session: The db session in use.
967
+ :param logger: Optional decorated logger that can be passed from the calling daemons or servers.
968
+ :returns: True, if the created lock is replicating, False otherwise.
969
+ :attention: This method modifies the contents of the locks, locks_to_create, replicas_to_create and replicas input parameters.
970
+ """
971
+
972
+ if rule.expires_at:
973
+ copy_pin_lifetime = rule.expires_at - datetime.utcnow()
974
+ copy_pin_lifetime = copy_pin_lifetime.seconds + copy_pin_lifetime.days * 24 * 3600
975
+ else:
976
+ copy_pin_lifetime = None
977
+
978
+ # If it is a Staging Area, the pin has to be extended
979
+ if staging_area:
980
+ transfers_to_create.append(create_transfer_dict(dest_rse_id=rse_id,
981
+ request_type=RequestType.STAGEIN,
982
+ scope=file['scope'],
983
+ name=file['name'],
984
+ rule=rule,
985
+ bytes_=file['bytes'],
986
+ md5=file['md5'],
987
+ adler32=file['adler32'],
988
+ ds_scope=dataset['scope'],
989
+ ds_name=dataset['name'],
990
+ copy_pin_lifetime=copy_pin_lifetime,
991
+ session=session))
992
+
993
+ # If staging_required type RSE then set pin to RSE attribute maximum_pin_lifetime
994
+ staging_required = get_rse_attribute(rse_id, RseAttr.STAGING_REQUIRED, session=session)
995
+ maximum_pin_lifetime = get_rse_attribute(rse_id, RseAttr.MAXIMUM_PIN_LIFETIME, session=session)
996
+
997
+ if staging_required:
998
+ if (not copy_pin_lifetime and maximum_pin_lifetime) or (copy_pin_lifetime and maximum_pin_lifetime and copy_pin_lifetime < int(maximum_pin_lifetime)):
999
+ copy_pin_lifetime = maximum_pin_lifetime
1000
+ rse_name = get_rse_name(rse_id=rse_id, session=session)
1001
+ logger(logging.DEBUG, f'Destination RSE {rse_name} is type staging_required with pin value: {copy_pin_lifetime}')
1002
+
1003
+ existing_replicas = [replica for replica in replicas[(file['scope'], file['name'])] if replica.rse_id == rse_id]
1004
+
1005
+ if existing_replicas: # A replica already exists (But could be UNAVAILABLE)
1006
+ existing_replica = existing_replicas[0]
1007
+
1008
+ # Replica is fully available -- AVAILABLE
1009
+ if existing_replica.state in [ReplicaState.AVAILABLE, ReplicaState.TEMPORARY_UNAVAILABLE]:
1010
+ new_lock = __create_lock(rule=rule,
1011
+ rse_id=rse_id,
1012
+ scope=file['scope'],
1013
+ name=file['name'],
1014
+ bytes_=file['bytes'],
1015
+ existing_replica=existing_replica,
1016
+ state=LockState.OK if not staging_required else LockState.REPLICATING)
1017
+ if rse_id not in locks_to_create:
1018
+ locks_to_create[rse_id] = []
1019
+ locks_to_create[rse_id].append(new_lock)
1020
+ locks[(file['scope'], file['name'])].append(new_lock)
1021
+ if not staging_required:
1022
+ return False
1023
+
1024
+ transfers_to_create.append(create_transfer_dict(dest_rse_id=rse_id,
1025
+ request_type=RequestType.STAGEIN,
1026
+ scope=file['scope'],
1027
+ name=file['name'],
1028
+ rule=rule,
1029
+ lock=new_lock,
1030
+ bytes_=file['bytes'],
1031
+ md5=file['md5'],
1032
+ adler32=file['adler32'],
1033
+ ds_scope=dataset['scope'],
1034
+ ds_name=dataset['name'],
1035
+ copy_pin_lifetime=copy_pin_lifetime,
1036
+ session=session))
1037
+
1038
+ # Replica is not available -- UNAVAILABLE
1039
+ elif existing_replica.state == ReplicaState.UNAVAILABLE:
1040
+ available_source_replica = True
1041
+ if source_rses:
1042
+ available_source_replica = False
1043
+ # Check if there is an eligible source replica for this lock
1044
+ if set(source_replicas.get((file['scope'], file['name']), [])).intersection(source_rses):
1045
+ available_source_replica = True
1046
+ new_lock = __create_lock(rule=rule,
1047
+ rse_id=rse_id,
1048
+ scope=file['scope'],
1049
+ name=file['name'],
1050
+ bytes_=file['bytes'],
1051
+ existing_replica=existing_replica,
1052
+ state=LockState.REPLICATING if (available_source_replica and availability_write) else LockState.STUCK)
1053
+ if rse_id not in locks_to_create:
1054
+ locks_to_create[rse_id] = []
1055
+ locks_to_create[rse_id].append(new_lock)
1056
+ locks[(file['scope'], file['name'])].append(new_lock)
1057
+ if not staging_area and not staging_required and available_source_replica and availability_write:
1058
+ transfers_to_create.append(create_transfer_dict(dest_rse_id=rse_id,
1059
+ request_type=RequestType.TRANSFER,
1060
+ scope=file['scope'],
1061
+ name=file['name'],
1062
+ rule=rule,
1063
+ lock=new_lock,
1064
+ bytes_=file['bytes'],
1065
+ md5=file['md5'],
1066
+ adler32=file['adler32'],
1067
+ ds_scope=dataset['scope'],
1068
+ ds_name=dataset['name'],
1069
+ session=session))
1070
+ return True
1071
+ return False
1072
+ # Replica is not available at the rse yet -- COPYING
1073
+ else:
1074
+ new_lock = __create_lock(rule=rule,
1075
+ rse_id=rse_id,
1076
+ scope=file['scope'],
1077
+ name=file['name'],
1078
+ bytes_=file['bytes'],
1079
+ existing_replica=existing_replica,
1080
+ state=LockState.REPLICATING)
1081
+ if rse_id not in locks_to_create:
1082
+ locks_to_create[rse_id] = []
1083
+ locks_to_create[rse_id].append(new_lock)
1084
+ locks[(file['scope'], file['name'])].append(new_lock)
1085
+ return True
1086
+ else: # Replica has to be created
1087
+ available_source_replica = True
1088
+ if source_rses:
1089
+ available_source_replica = False
1090
+ # Check if there is an eligible source replica for this lock
1091
+ if set(source_replicas.get((file['scope'], file['name']), [])).intersection(source_rses):
1092
+ available_source_replica = True
1093
+
1094
+ new_replica = __create_replica(rse_id=rse_id,
1095
+ scope=file['scope'],
1096
+ name=file['name'],
1097
+ bytes_=file['bytes'],
1098
+ md5=file['md5'],
1099
+ adler32=file['adler32'],
1100
+ state=ReplicaState.COPYING if (available_source_replica and availability_write) else ReplicaState.UNAVAILABLE)
1101
+ if rse_id not in replicas_to_create:
1102
+ replicas_to_create[rse_id] = []
1103
+ replicas_to_create[rse_id].append(new_replica)
1104
+ replicas[(file['scope'], file['name'])].append(new_replica)
1105
+
1106
+ new_lock = __create_lock(rule=rule,
1107
+ rse_id=rse_id,
1108
+ scope=file['scope'],
1109
+ name=file['name'],
1110
+ bytes_=file['bytes'],
1111
+ existing_replica=new_replica,
1112
+ state=LockState.REPLICATING if (available_source_replica and availability_write) else LockState.STUCK)
1113
+ if rse_id not in locks_to_create:
1114
+ locks_to_create[rse_id] = []
1115
+ locks_to_create[rse_id].append(new_lock)
1116
+ locks[(file['scope'], file['name'])].append(new_lock)
1117
+
1118
+ if not staging_area and not staging_required and available_source_replica and availability_write:
1119
+ transfers_to_create.append(create_transfer_dict(dest_rse_id=rse_id,
1120
+ request_type=RequestType.TRANSFER,
1121
+ scope=file['scope'],
1122
+ name=file['name'],
1123
+ rule=rule,
1124
+ lock=new_lock,
1125
+ bytes_=file['bytes'],
1126
+ md5=file['md5'],
1127
+ adler32=file['adler32'],
1128
+ ds_scope=dataset['scope'],
1129
+ ds_name=dataset['name'],
1130
+ session=session))
1131
+ return True
1132
+ elif staging_required:
1133
+ transfers_to_create.append(create_transfer_dict(dest_rse_id=rse_id,
1134
+ request_type=RequestType.TRANSFER,
1135
+ scope=file['scope'],
1136
+ name=file['name'],
1137
+ rule=rule,
1138
+ lock=new_lock,
1139
+ bytes_=file['bytes'],
1140
+ md5=file['md5'],
1141
+ adler32=file['adler32'],
1142
+ ds_scope=dataset['scope'],
1143
+ ds_name=dataset['name'],
1144
+ session=session))
1145
+ return True
1146
+ return False
1147
+
1148
+
1149
+ def __create_lock(rule, rse_id, scope, name, bytes_, state, existing_replica, logger=logging.log):
1150
+ """
1151
+ Create and return a new SQLAlchemy Lock object.
1152
+
1153
+ :param rule: The SQLAlchemy rule object.
1154
+ :param rse_id: The rse_id of the lock.
1155
+ :param scope: The scope of the lock.
1156
+ :param name: The name of the lock.
1157
+ :param bytes_: Bytes of the lock.
1158
+ :param state: State of the lock.
1159
+ :param existing_replica: Replica object.
1160
+ :param logger: Optional decorated logger that can be passed from the calling daemons or servers.
1161
+ """
1162
+
1163
+ new_lock = models.ReplicaLock(rule_id=rule.id,
1164
+ rse_id=rse_id,
1165
+ scope=scope,
1166
+ name=name,
1167
+ account=rule.account,
1168
+ bytes=bytes_,
1169
+ state=state)
1170
+ if state == LockState.OK:
1171
+ existing_replica.lock_cnt += 1
1172
+ existing_replica.tombstone = None
1173
+ rule.locks_ok_cnt += 1
1174
+ logger(logging.DEBUG, 'Creating OK Lock %s:%s on %s for rule %s', scope, name, rse_id, str(rule.id))
1175
+ elif state == LockState.REPLICATING:
1176
+ existing_replica.state = ReplicaState.COPYING
1177
+ existing_replica.lock_cnt += 1
1178
+ existing_replica.tombstone = None
1179
+ rule.locks_replicating_cnt += 1
1180
+ logger(logging.DEBUG, 'Creating REPLICATING Lock %s:%s on %s for rule %s', scope, rse_id, name, str(rule.id))
1181
+ elif state == LockState.STUCK:
1182
+ existing_replica.lock_cnt += 1
1183
+ existing_replica.tombstone = None
1184
+ rule.locks_stuck_cnt += 1
1185
+ logger(logging.DEBUG, 'Creating STUCK Lock %s:%s on %s for rule %s', scope, name, rse_id, str(rule.id))
1186
+ return new_lock
1187
+
1188
+
1189
+ def __create_replica(rse_id, scope, name, bytes_, state, md5, adler32, logger=logging.log):
1190
+ """
1191
+ Create and return a new SQLAlchemy replica object.
1192
+
1193
+ :param rse_id: RSE id of the replica.
1194
+ :param scope: Scope of the replica.
1195
+ :param name: Name of the replica.
1196
+ :param bytes_: Bytes of the replica.
1197
+ :param state: State of the replica.
1198
+ :param md5: MD5 checksum of the replica.
1199
+ :param adler32: ADLER32 checksum of the replica.
1200
+ :param logger: Optional decorated logger that can be passed from the calling daemons or servers.
1201
+ """
1202
+
1203
+ new_replica = models.RSEFileAssociation(rse_id=rse_id,
1204
+ scope=scope,
1205
+ name=name,
1206
+ bytes=bytes_,
1207
+ md5=md5,
1208
+ adler32=adler32,
1209
+ tombstone=None,
1210
+ state=state,
1211
+ lock_cnt=0)
1212
+ logger(logging.DEBUG, 'Creating %s replica for %s:%s on %s', state, scope, name, rse_id)
1213
+ return new_replica
1214
+
1215
+
1216
+ @transactional_session
1217
+ def __update_lock_replica_and_create_transfer(lock, replica, rule, dataset, transfers_to_create, *, session: "Session", logger=logging.log):
1218
+ """
1219
+ This method updates a lock and replica and fills the corresponding dictionaries.
1220
+
1221
+ :param lock: The lock to update.
1222
+ :param replica: The replica to update.
1223
+ :param rule: Rule to update.
1224
+ :param dataset: Dataset dictionary holding the dataset information.
1225
+ :param transfers_to_create: List of transfers to create.
1226
+ :param session: The db session in use.
1227
+ :param logger: Optional decorated logger that can be passed from the calling daemons or servers.
1228
+ :attention: This method modifies the contents of the transfers_to_create input parameters.
1229
+ """
1230
+
1231
+ logger(logging.DEBUG, 'Updating Lock %s:%s for rule %s', lock.scope, lock.name, str(rule.id))
1232
+ lock.state = LockState.REPLICATING
1233
+ rule.locks_stuck_cnt -= 1
1234
+ rule.locks_replicating_cnt += 1
1235
+ replica.state = ReplicaState.COPYING
1236
+
1237
+ if not lock.repair_cnt:
1238
+ lock.repair_cnt = 1
1239
+ else:
1240
+ lock.repair_cnt += 1
1241
+
1242
+ if get_rse(rse_id=lock.rse_id, session=session)['staging_area']:
1243
+ copy_pin_lifetime = rule.expires_at - datetime.utcnow()
1244
+ copy_pin_lifetime = copy_pin_lifetime.seconds + copy_pin_lifetime.days * 24 * 3600
1245
+ transfers_to_create.append(create_transfer_dict(dest_rse_id=lock.rse_id,
1246
+ scope=lock.scope,
1247
+ name=lock.name,
1248
+ rule=rule,
1249
+ lock=lock,
1250
+ bytes_=replica.bytes,
1251
+ md5=replica.md5,
1252
+ adler32=replica.adler32,
1253
+ ds_scope=dataset['scope'],
1254
+ ds_name=dataset['name'],
1255
+ copy_pin_lifetime=copy_pin_lifetime,
1256
+ request_type=RequestType.STAGEIN,
1257
+ session=session))
1258
+ else:
1259
+ transfers_to_create.append(create_transfer_dict(dest_rse_id=lock.rse_id,
1260
+ scope=lock.scope,
1261
+ name=lock.name,
1262
+ rule=rule,
1263
+ lock=lock,
1264
+ bytes_=replica.bytes,
1265
+ md5=replica.md5,
1266
+ adler32=replica.adler32,
1267
+ ds_scope=dataset['scope'],
1268
+ ds_name=dataset['name'],
1269
+ request_type=RequestType.TRANSFER,
1270
+ retry_count=1,
1271
+ session=session))
1272
+
1273
+
1274
+ @transactional_session
1275
+ def __set_replica_unavailable(replica, *, session: "Session"):
1276
+ """
1277
+ This method updates a replica and sets it to UNAVAILABLE.
1278
+
1279
+ :param replica: The replica to update.
1280
+ :param session: The db session in use.
1281
+ """
1282
+
1283
+ replica.lock_cnt -= 1
1284
+ if replica.lock_cnt == 0:
1285
+ replica.tombstone = OBSOLETE
1286
+ replica.state = ReplicaState.UNAVAILABLE
1287
+
1288
+
1289
+ # # debug helper functions used in apply_rule
1290
+ #
1291
+ # def prnt(x, header=None):
1292
+ # print()
1293
+ # if header:
1294
+ # print(header)
1295
+ # if isinstance(x, list) and len(x):
1296
+ # for elem in x:
1297
+ # print(' ', elem)
1298
+ # elif isinstance(x, dict) and len(x) and isinstance(x.values()[0], list):
1299
+ # for k, v in x.items():
1300
+ # if isinstance(v,list) and len(v):
1301
+ # print(' ', k, ':')
1302
+ # for elem in v:
1303
+ # print(' ', elem)
1304
+ # else:
1305
+ # print(' ', k, ':', v)
1306
+ # else:
1307
+ # print(x)
1308
+ #
1309
+ # import os
1310
+ # def mem():
1311
+ # # start your debug python session with harmless -R option to easily grep it out
1312
+ # os.system("ps -U root -o pid,user,rss:10,vsz:10,args:100 | grep 'python -R' | grep -v bin | grep -v grep")
1313
+
1314
+
1315
+ @transactional_session
1316
+ def apply_rule(did, rule, rses, source_rses, rseselector, *, session: "Session", logger=logging.log):
1317
+ """
1318
+ Apply a replication rule to one did.
1319
+
1320
+ :param did: the did object
1321
+ :param rule: the rule object
1322
+ :param rses: target rses_ids
1323
+ :param source_rses: source rses_ids
1324
+ :param rseselector: the rseselector object
1325
+ :param logger: Optional decorated logger that can be passed from the calling daemons or servers.
1326
+ :param session: the database session in use
1327
+ """
1328
+
1329
+ max_partition_size = config_get_int('rules', 'apply_rule_max_partition_size', default=2000, session=session) # process dataset files in bunches of max this size
1330
+
1331
+ # accounting counters
1332
+ rse_counters_files = {}
1333
+ rse_counters_bytes = {}
1334
+ account_counters_files = {}
1335
+ account_counters_bytes = {}
1336
+
1337
+ if did.did_type == DIDType.FILE:
1338
+ # NOTE: silently ignore rule.grouping
1339
+ if True: # instead of -> if rule.grouping == RuleGrouping.NONE:
1340
+ locks = {} # {(scope,name): [SQLAlchemy]}
1341
+ replicas = {} # {(scope, name): [SQLAlchemy]}
1342
+ source_replicas = {} # {(scope, name): [rse_id]
1343
+ # get files and replicas, lock the replicas
1344
+ replicas[(did.scope, did.name)] = rucio.core.replica.get_and_lock_file_replicas(scope=did.scope, name=did.name, nowait=True, restrict_rses=rses,
1345
+ session=session)
1346
+ # prnt(did, 'file')
1347
+ # prnt(replicas, 'replicas')
1348
+
1349
+ # get and lock the locks
1350
+ locks[(did.scope, did.name)] = rucio.core.lock.get_replica_locks(scope=did.scope, name=did.name, nowait=True, restrict_rses=rses,
1351
+ session=session)
1352
+ # prnt(locks, 'locks')
1353
+
1354
+ # if needed get source replicas
1355
+ if source_rses:
1356
+ source_replicas[(did.scope, did.name)] = rucio.core.replica.get_source_replicas(scope=did.scope, name=did.name, source_rses=source_rses,
1357
+ session=session)
1358
+ else:
1359
+ source_replicas = {}
1360
+ # prnt(source_replicas, 'source_replicas')
1361
+
1362
+ # to align code with cases below, create file dict
1363
+ file = {'name': did.name, 'scope': did.scope,
1364
+ 'bytes': did.bytes, 'md5': did.md5, 'adler32': did.adler32}
1365
+
1366
+ # calculate target RSEs
1367
+ rse_coverage = {replica.rse_id: file['bytes'] for replica in replicas[(file['scope'], file['name'])]}
1368
+ # prnt(rse_coverage)
1369
+ preferred_rse_ids = rse_coverage.keys()
1370
+ # prnt(preferred_rse_ids)
1371
+ rse_tuples = rseselector.select_rse(size=file['bytes'], preferred_rse_ids=preferred_rse_ids,
1372
+ prioritize_order_over_weight=True, existing_rse_size=rse_coverage)
1373
+ # prnt(rse_tuples)
1374
+
1375
+ # initialize accumulators for __create_lock_and_replica calls
1376
+ locks_to_create = {} # {'rse_id': [locks]}
1377
+ replicas_to_create = {} # {'rse_id': [replicas]}
1378
+ transfers_to_create = [] # [{'dest_rse_id':, 'scope':, 'name':, 'request_type':, 'metadata':}]
1379
+
1380
+ for rse_id, staging_area, availability_write in rse_tuples:
1381
+ # check for bug ????
1382
+ if len([lock for lock in locks[(file['scope'], file['name'])] if lock.rule_id == rule.id and lock.rse_id == rse_id]) == 1:
1383
+ logger(logging.DEBUG, '>>> WARNING unexpected duplicate lock for file %s at RSE %s' % (file, rse_id))
1384
+ continue
1385
+ # proceed
1386
+ __create_lock_and_replica(file=file, dataset={'scope': None, 'name': None}, rule=rule,
1387
+ rse_id=rse_id, staging_area=staging_area, availability_write=availability_write, source_rses=source_rses,
1388
+ replicas=replicas, locks=locks, source_replicas=source_replicas,
1389
+ locks_to_create=locks_to_create, replicas_to_create=replicas_to_create, transfers_to_create=transfers_to_create,
1390
+ session=session)
1391
+
1392
+ # prnt(locks_to_create, 'locks_to_create')
1393
+ # prnt(replicas_to_create, 'replicas_to_create')
1394
+ # prnt(transfers_to_create, 'transfers_to_create')
1395
+
1396
+ # flush to DB
1397
+ session.add_all([item for sublist in replicas_to_create.values() for item in sublist])
1398
+ session.add_all([item for sublist in locks_to_create.values() for item in sublist])
1399
+ request_core.queue_requests(requests=transfers_to_create, session=session)
1400
+ session.flush()
1401
+
1402
+ # increment counters
1403
+ # align code with the one used inside the file loop below
1404
+ for rse_id in replicas_to_create.keys():
1405
+ rse_counters_files[rse_id] = len(replicas_to_create[rse_id]) + rse_counters_files.get(rse_id, 0)
1406
+ rse_counters_bytes[rse_id] = sum([replica.bytes for replica in replicas_to_create[rse_id]]) + rse_counters_bytes.get(rse_id, 0)
1407
+ # prnt(rse_counters_files, 'rse_counters_files')
1408
+ # prnt(rse_counters_bytes, 'rse_counters_bytes')
1409
+
1410
+ for rse_id in locks_to_create.keys():
1411
+ account_counters_files[rse_id] = len(locks_to_create[rse_id]) + account_counters_files.get(rse_id, 0)
1412
+ account_counters_bytes[rse_id] = sum([lock.bytes for lock in locks_to_create[rse_id]]) + account_counters_bytes.get(rse_id, 0)
1413
+ # prnt(account_counters_files, 'account_counters_files')
1414
+ # prnt(account_counters_bytes, 'account_counters_bytes')
1415
+
1416
+ else:
1417
+ # handle dataset case by converting it to singleton container case
1418
+ # NOTE: this will handle DATASET/ALL as if it was DATASET/DATASET
1419
+ datasets = [] # [(scope,name)]
1420
+ if did.did_type == DIDType.DATASET:
1421
+ datasets.append((did.scope, did.name, ))
1422
+ elif did.did_type == DIDType.CONTAINER:
1423
+ for child_dataset in rucio.core.did.list_child_datasets(scope=did.scope, name=did.name, session=session):
1424
+ # ensure there are no duplicates
1425
+ newds = (child_dataset['scope'], child_dataset['name'], )
1426
+ if newds not in datasets:
1427
+ datasets.append(newds)
1428
+ # sort alphabetically for deterministic order
1429
+ try:
1430
+ datasets = sorted(datasets)
1431
+ except Exception:
1432
+ pass
1433
+
1434
+ # prnt(datasets)
1435
+
1436
+ rse_coverage = {} # rse_coverage = { rse_id : bytes }
1437
+ rse_tuples = [] # rse_tuples = [(rse_id, staging_area, availability_write)]
1438
+ used_rse_ids = [] # for NONE grouping keep track of actual used RSEs
1439
+
1440
+ if rule.grouping == RuleGrouping.ALL:
1441
+ # calculate target RSEs
1442
+ nbytes = 0
1443
+ rse_coverage = {}
1444
+ # simply loop over child datasets
1445
+ # this is an approximation because ignoring the possibility of file overlap
1446
+ for ds_scope, ds_name in datasets:
1447
+ ds = rucio.core.did.get_did(scope=ds_scope, name=ds_name, dynamic_depth=DIDType.FILE, session=session) # this will be retrieved again later on -> could be optimized
1448
+ nbytes += ds['bytes']
1449
+ one_rse_coverage = rucio.core.replica.get_RSEcoverage_of_dataset(scope=ds_scope, name=ds_name, session=session)
1450
+ for rse_id, bytes_ in one_rse_coverage.items():
1451
+ rse_coverage[rse_id] = bytes_ + rse_coverage.get(rse_id, 0)
1452
+
1453
+ # prnt(rse_coverage)
1454
+ preferred_rse_ids = [x[0] for x in sorted(rse_coverage.items(), key=lambda tup: tup[1], reverse=True)]
1455
+ # prnt(preferred_rse_ids)
1456
+ rse_tuples = rseselector.select_rse(size=nbytes, preferred_rse_ids=preferred_rse_ids,
1457
+ prioritize_order_over_weight=True, existing_rse_size=rse_coverage)
1458
+ # prnt(rse_tuples)
1459
+
1460
+ for ds_scope, ds_name in datasets:
1461
+ # prnt(('processing dataset ',ds_scope, ds_name))
1462
+ #
1463
+ ds = rucio.core.did.get_did(scope=ds_scope, name=ds_name, dynamic_depth=DIDType.FILE, session=session)
1464
+ ds_length = ds['length']
1465
+ ds_bytes = ds['bytes']
1466
+ ds_open = ds['open']
1467
+ # prnt(ds)
1468
+
1469
+ # calculate number of partitions based on nr of files
1470
+ npartitions = int(ds_length / max_partition_size) + 1
1471
+ # prnt(npartitions)
1472
+
1473
+ if rule.grouping == RuleGrouping.DATASET:
1474
+ # calculate target RSEs
1475
+ rse_coverage = rucio.core.replica.get_RSEcoverage_of_dataset(scope=ds_scope, name=ds_name, session=session)
1476
+ # prnt(rse_coverage)
1477
+ preferred_rse_ids = [x[0] for x in sorted(rse_coverage.items(), key=lambda tup: tup[1], reverse=True)]
1478
+ # prnt(preferred_rse_ids)
1479
+ rse_tuples = rseselector.select_rse(size=ds_bytes, preferred_rse_ids=preferred_rse_ids,
1480
+ prioritize_order_over_weight=True, existing_rse_size=rse_coverage)
1481
+ # prnt(rse_tuples)
1482
+
1483
+ # loop over the partitions even if it is just one
1484
+ for p in range(npartitions):
1485
+ # prnt(('processing partition ', p, npartitions))
1486
+
1487
+ # files is [{'scope':, 'name':, 'bytes':, 'md5':, 'adler32':}]
1488
+ # locks is {(scope,name): [SQLAlchemy]}
1489
+ # replicas = {(scope, name): [SQLAlchemy]}
1490
+ # source replicas is {(scope, name): [SQLAlchemy]}
1491
+
1492
+ # get files and replicas, lock the replicas
1493
+ files, replicas = rucio.core.replica.get_and_lock_file_replicas_for_dataset(scope=ds_scope, name=ds_name, nowait=True, restrict_rses=rses,
1494
+ total_threads=npartitions, thread_id=p, session=session)
1495
+ # prnt(files, 'files')
1496
+ # prnt(replicas, 'replicas')
1497
+
1498
+ # get and lock the replica locks
1499
+ locks = rucio.core.lock.get_files_and_replica_locks_of_dataset(scope=ds_scope, name=ds_name, nowait=True, restrict_rses=rses,
1500
+ total_threads=npartitions, thread_id=p, session=session)
1501
+ # prnt(locks, 'locks')
1502
+
1503
+ # if needed get source replicas
1504
+ if source_rses:
1505
+ source_replicas = rucio.core.replica.get_source_replicas_for_dataset(scope=ds_scope, name=ds_name, source_rses=source_rses,
1506
+ total_threads=npartitions, thread_id=p, session=session)
1507
+ else:
1508
+ source_replicas = {}
1509
+ # prnt(source_replicas, 'source_replicas')
1510
+
1511
+ # initialize accumulators for __create_lock_and_replica calls
1512
+ locks_to_create = {} # {'rse_id': [locks]}
1513
+ replicas_to_create = {} # {'rse_id': [replicas]}
1514
+ transfers_to_create = [] # [{'dest_rse_id':, 'scope':, 'name':, 'request_type':, 'metadata':}]
1515
+
1516
+ # loop over the rse tuples
1517
+ for file in files:
1518
+ # check for duplicate due to dataset overlap within container
1519
+ if len([lock for lock in locks[(file['scope'], file['name'])] if lock.rule_id == rule.id]) == rule.copies:
1520
+ logger(logging.DEBUG, '>>> WARNING skipping (shared?) file %s' % file)
1521
+ continue
1522
+
1523
+ if rule.grouping == RuleGrouping.NONE:
1524
+ # calculate target RSEs
1525
+ rse_coverage = {replica.rse_id: file['bytes'] for replica in replicas[(file['scope'], file['name'])]}
1526
+ # prnt(rse_coverage)
1527
+ preferred_rse_ids = rse_coverage.keys()
1528
+ # prnt(preferred_rse_ids)
1529
+ rse_tuples = rseselector.select_rse(size=file['bytes'], preferred_rse_ids=preferred_rse_ids,
1530
+ prioritize_order_over_weight=True, existing_rse_size=rse_coverage)
1531
+ # prnt(rse_tuples)
1532
+ # keep track of used RSEs
1533
+ for rt in rse_tuples:
1534
+ if not rt[0] in used_rse_ids:
1535
+ used_rse_ids.append(rt[0])
1536
+
1537
+ for rse_id, staging_area, availability_write in rse_tuples:
1538
+ # check for bug ????
1539
+ if len([lock for lock in locks[(file['scope'], file['name'])] if lock.rule_id == rule.id and lock.rse_id == rse_id]) == 1:
1540
+ logger(logging.DEBUG, '>>> WARNING unexpected duplicate lock for file %s at RSE %s' % (file, rse_id))
1541
+ continue
1542
+ # proceed
1543
+ __create_lock_and_replica(file=file, dataset={'scope': ds_scope, 'name': ds_name}, rule=rule,
1544
+ rse_id=rse_id, staging_area=staging_area, availability_write=availability_write, source_rses=source_rses,
1545
+ replicas=replicas, locks=locks, source_replicas=source_replicas,
1546
+ locks_to_create=locks_to_create, replicas_to_create=replicas_to_create, transfers_to_create=transfers_to_create,
1547
+ session=session)
1548
+
1549
+ # prnt(locks_to_create, 'locks_to_create')
1550
+ # prnt(replicas_to_create, 'replicas_to_create')
1551
+ # prnt(transfers_to_create, 'transfers_to_create')
1552
+
1553
+ # flush to DB
1554
+ session.add_all([item for sublist in replicas_to_create.values() for item in sublist])
1555
+ session.add_all([item for sublist in locks_to_create.values() for item in sublist])
1556
+ request_core.queue_requests(requests=transfers_to_create, session=session)
1557
+ session.flush()
1558
+
1559
+ # increment counters
1560
+ # do not update (and lock !) counters inside loop here, update at very end and only once
1561
+ for rse_id in replicas_to_create.keys():
1562
+ rse_counters_files[rse_id] = len(replicas_to_create[rse_id]) + rse_counters_files.get(rse_id, 0)
1563
+ rse_counters_bytes[rse_id] = sum([replica.bytes for replica in replicas_to_create[rse_id]]) + rse_counters_bytes.get(rse_id, 0)
1564
+ # prnt(rse_counters_files, 'rse_counters_files')
1565
+ # prnt(rse_counters_bytes, 'rse_counters_bytes')
1566
+
1567
+ for rse_id in locks_to_create.keys():
1568
+ account_counters_files[rse_id] = len(locks_to_create[rse_id]) + account_counters_files.get(rse_id, 0)
1569
+ account_counters_bytes[rse_id] = sum([lock.bytes for lock in locks_to_create[rse_id]]) + account_counters_bytes.get(rse_id, 0)
1570
+ # prnt(account_counters_files, 'account_counters_files')
1571
+ # prnt(account_counters_bytes, 'account_counters_bytes')
1572
+
1573
+ # mem()
1574
+
1575
+ # dataset lock/replica
1576
+ u_rses = (used_rse_ids if rule.grouping == RuleGrouping.NONE else [x[0] for x in rse_tuples])
1577
+ # prnt(u_rses, 'used RSE ids')
1578
+ for u_rse in u_rses:
1579
+ # prnt('creating dataset lock/replica for %s on %s' % (ds_name,u_rse))
1580
+ if rule.grouping == RuleGrouping.DATASET or rule.grouping == RuleGrouping.ALL:
1581
+ # add dataset lock
1582
+ models.DatasetLock(scope=ds_scope, name=ds_name,
1583
+ rule_id=rule.id,
1584
+ rse_id=u_rse,
1585
+ state=LockState.REPLICATING,
1586
+ account=rule.account,
1587
+ length=ds_length if not ds_open else None,
1588
+ bytes=ds_bytes if not ds_open else None
1589
+ ).save(session=session)
1590
+
1591
+ # add dataset replica if not already existing (rule_id is not in PK)
1592
+ try:
1593
+ stmt = select(
1594
+ models.CollectionReplica
1595
+ ).where(
1596
+ and_(models.CollectionReplica.scope == ds_scope,
1597
+ models.CollectionReplica.name == ds_name,
1598
+ models.CollectionReplica.rse_id == u_rse)
1599
+ )
1600
+ session.execute(stmt).one()
1601
+ except NoResultFound:
1602
+ models.CollectionReplica(scope=ds_scope, name=ds_name, did_type=DIDType.DATASET,
1603
+ rse_id=u_rse,
1604
+ bytes=0, length=0, available_bytes=0, available_replicas_cnt=0,
1605
+ state=ReplicaState.UNAVAILABLE
1606
+ ).save(session=session)
1607
+
1608
+ models.UpdatedCollectionReplica(scope=ds_scope, name=ds_name, did_type=DIDType.DATASET
1609
+ ).save(session=session)
1610
+
1611
+ # update account and rse counters
1612
+ for rse_id in rse_counters_files:
1613
+ rse_counter.increase(rse_id=rse_id, files=rse_counters_files[rse_id], bytes_=rse_counters_bytes[rse_id], session=session)
1614
+ for rse_id in account_counters_files:
1615
+ account_counter.increase(rse_id=rse_id, account=rule.account, files=account_counters_files[rse_id], bytes_=account_counters_bytes[rse_id], session=session)
1616
+ session.flush()
1617
+
1618
+ return