morango 0.6.11__py2.py3-none-any.whl → 0.8.6__py2.py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (64) hide show
  1. morango/__init__.py +1 -6
  2. morango/api/serializers.py +3 -0
  3. morango/api/viewsets.py +38 -23
  4. morango/apps.py +1 -2
  5. morango/constants/settings.py +3 -0
  6. morango/constants/transfer_stages.py +1 -1
  7. morango/constants/transfer_statuses.py +1 -1
  8. morango/errors.py +4 -0
  9. morango/management/commands/cleanupsyncs.py +64 -14
  10. morango/migrations/0001_initial.py +0 -2
  11. morango/migrations/0001_squashed_0024_auto_20240129_1757.py +583 -0
  12. morango/migrations/0002_auto_20170511_0400.py +0 -2
  13. morango/migrations/0002_store_idx_morango_deserialize.py +21 -0
  14. morango/migrations/0003_auto_20170519_0543.py +0 -2
  15. morango/migrations/0004_auto_20170520_2112.py +0 -2
  16. morango/migrations/0005_auto_20170629_2139.py +0 -2
  17. morango/migrations/0006_instanceidmodel_system_id.py +0 -2
  18. morango/migrations/0007_auto_20171018_1615.py +0 -2
  19. morango/migrations/0008_auto_20171114_2217.py +0 -2
  20. morango/migrations/0009_auto_20171205_0252.py +0 -2
  21. morango/migrations/0010_auto_20171206_1615.py +0 -2
  22. morango/migrations/0011_sharedkey.py +0 -2
  23. morango/migrations/0012_auto_20180927_1658.py +0 -2
  24. morango/migrations/0013_auto_20190627_1513.py +0 -2
  25. morango/migrations/0014_syncsession_extra_fields.py +0 -2
  26. morango/migrations/0015_auto_20200508_2104.py +2 -3
  27. morango/migrations/0016_store_deserialization_error.py +2 -3
  28. morango/migrations/0017_store_last_transfer_session_id.py +1 -2
  29. morango/migrations/0018_auto_20210714_2216.py +2 -3
  30. morango/migrations/0019_auto_20220113_1807.py +2 -3
  31. morango/migrations/0020_postgres_fix_nullable.py +0 -2
  32. morango/migrations/0021_store_partition_index_create.py +0 -2
  33. morango/migrations/0022_rename_instance_fields.py +23 -0
  34. morango/migrations/0023_add_instance_id_fields.py +24 -0
  35. morango/migrations/0024_auto_20240129_1757.py +28 -0
  36. morango/models/__init__.py +0 -6
  37. morango/models/certificates.py +137 -28
  38. morango/models/core.py +48 -46
  39. morango/models/fields/crypto.py +20 -6
  40. morango/models/fields/uuids.py +2 -1
  41. morango/models/utils.py +5 -6
  42. morango/proquint.py +2 -3
  43. morango/registry.py +28 -49
  44. morango/sync/backends/base.py +34 -0
  45. morango/sync/backends/postgres.py +129 -0
  46. morango/sync/backends/utils.py +10 -11
  47. morango/sync/context.py +198 -13
  48. morango/sync/controller.py +33 -11
  49. morango/sync/operations.py +324 -251
  50. morango/sync/session.py +11 -0
  51. morango/sync/syncsession.py +78 -85
  52. morango/sync/utils.py +18 -0
  53. morango/urls.py +3 -3
  54. morango/utils.py +2 -3
  55. {morango-0.6.11.dist-info → morango-0.8.6.dist-info}/METADATA +29 -14
  56. morango-0.8.6.dist-info/RECORD +79 -0
  57. {morango-0.6.11.dist-info → morango-0.8.6.dist-info}/WHEEL +1 -1
  58. morango/models/morango_mptt.py +0 -33
  59. morango/settings.py +0 -115
  60. morango/wsgi.py +0 -33
  61. morango-0.6.11.dist-info/RECORD +0 -77
  62. {morango-0.6.11.dist-info → morango-0.8.6.dist-info/licenses}/AUTHORS.md +0 -0
  63. {morango-0.6.11.dist-info → morango-0.8.6.dist-info/licenses}/LICENSE +0 -0
  64. {morango-0.6.11.dist-info → morango-0.8.6.dist-info}/top_level.txt +0 -0
@@ -4,16 +4,16 @@ import json
4
4
  import logging
5
5
  import uuid
6
6
  from collections import defaultdict
7
+ from contextlib import contextmanager
7
8
 
8
9
  from django.core import exceptions
9
10
  from django.core.serializers.json import DjangoJSONEncoder
10
11
  from django.db import connection
11
- from django.db import connections
12
- from django.db import router
13
12
  from django.db import transaction
14
13
  from django.db.models import CharField
15
14
  from django.db.models import Q
16
15
  from django.db.models import signals
16
+ from django.db.utils import OperationalError
17
17
  from django.utils import timezone
18
18
  from rest_framework.exceptions import ValidationError
19
19
 
@@ -22,6 +22,7 @@ from morango.constants import transfer_stages
22
22
  from morango.constants import transfer_statuses
23
23
  from morango.constants.capabilities import ASYNC_OPERATIONS
24
24
  from morango.constants.capabilities import FSIC_V2_FORMAT
25
+ from morango.errors import MorangoDatabaseError
25
26
  from morango.errors import MorangoInvalidFSICPartition
26
27
  from morango.errors import MorangoLimitExceeded
27
28
  from morango.errors import MorangoResumeSyncError
@@ -46,6 +47,7 @@ from morango.sync.backends.utils import load_backend
46
47
  from morango.sync.backends.utils import TemporaryTable
47
48
  from morango.sync.context import LocalSessionContext
48
49
  from morango.sync.context import NetworkSessionContext
50
+ from morango.sync.utils import lock_partitions
49
51
  from morango.sync.utils import mute_signals
50
52
  from morango.sync.utils import validate_and_create_buffer_data
51
53
  from morango.utils import _assert
@@ -56,17 +58,6 @@ logger = logging.getLogger(__name__)
56
58
 
57
59
  DBBackend = load_backend(connection)
58
60
 
59
- # if postgres, get serializable db connection
60
- db_name = router.db_for_write(Store)
61
- USING_DB = db_name
62
- if "postgresql" in transaction.get_connection(USING_DB).vendor:
63
- USING_DB = db_name + "-serializable"
64
- _assert(
65
- USING_DB in connections,
66
- "Please add a `default-serializable` database connection in your django settings file, \
67
- which copies all the configuration settings of the `default` db connection",
68
- )
69
-
70
61
  SQL_UNION_MAX = 500
71
62
 
72
63
 
@@ -101,6 +92,32 @@ def _self_referential_fk(model):
101
92
  return None
102
93
 
103
94
 
95
+ @contextmanager
96
+ def _begin_transaction(sync_filter, isolated=False, shared_lock=False):
97
+ """
98
+ Starts a transaction, sets the transaction isolation level to repeatable read, and locks
99
+ affected partitions
100
+
101
+ :param sync_filter: The filter for filtering applicable records of the sync
102
+ :type sync_filter: morango.models.certificates.Filter|None
103
+ :param isolated: Whether to alter the transaction isolation to repeatable-read
104
+ :type isolated: bool
105
+ :param shared_lock: Whether the advisory lock should be exclusive or shared
106
+ :type shared_lock: bool
107
+ """
108
+ if isolated:
109
+ # when isolation is requested, we modify the transaction isolation of the connection for the
110
+ # duration of the transaction
111
+ with DBBackend._set_transaction_repeatable_read():
112
+ with transaction.atomic(savepoint=False):
113
+ lock_partitions(DBBackend, sync_filter=sync_filter, shared=shared_lock)
114
+ yield
115
+ else:
116
+ with transaction.atomic():
117
+ lock_partitions(DBBackend, sync_filter=sync_filter, shared=shared_lock)
118
+ yield
119
+
120
+
104
121
  def _serialize_into_store(profile, filter=None):
105
122
  """
106
123
  Takes data from app layer and serializes the models into the store.
@@ -115,7 +132,7 @@ def _serialize_into_store(profile, filter=None):
115
132
  # ensure that we write and retrieve the counter in one go for consistency
116
133
  current_id = InstanceIDModel.get_current_instance_and_increment_counter()
117
134
 
118
- with transaction.atomic(using=USING_DB):
135
+ with _begin_transaction(filter, isolated=True):
119
136
  # create Q objects for filtering by prefixes
120
137
  prefix_condition = None
121
138
  if filter:
@@ -128,7 +145,7 @@ def _serialize_into_store(profile, filter=None):
128
145
  for model in syncable_models.get_models(profile):
129
146
  new_store_records = []
130
147
  new_rmc_records = []
131
- klass_queryset = model.objects.filter(_morango_dirty_bit=True)
148
+ klass_queryset = model.syncing_objects.filter(_morango_dirty_bit=True)
132
149
  if prefix_condition:
133
150
  klass_queryset = klass_queryset.filter(prefix_condition)
134
151
  store_records_dict = Store.objects.in_bulk(
@@ -430,7 +447,7 @@ def _deserialize_from_store(profile, skip_erroring=False, filter=None):
430
447
  excluded_list = []
431
448
  deleted_list = []
432
449
 
433
- with transaction.atomic(using=USING_DB):
450
+ with _begin_transaction(filter, isolated=True):
434
451
  # iterate through classes which are in foreign key dependency order
435
452
  for model in syncable_models.get_models(profile):
436
453
  deferred_fks = defaultdict(list)
@@ -590,7 +607,6 @@ def _deserialize_from_store(profile, skip_erroring=False, filter=None):
590
607
  ).update(dirty_bit=False)
591
608
 
592
609
 
593
- @transaction.atomic(using=USING_DB)
594
610
  def _queue_into_buffer_v1(transfersession):
595
611
  """
596
612
  Takes a chunk of data from the store to be put into the buffer to be sent to another morango instance. This is the legacy
@@ -601,117 +617,122 @@ def _queue_into_buffer_v1(transfersession):
601
617
  as well as the partition for the data we are syncing.
602
618
  """
603
619
  filter_prefixes = Filter(transfersession.filter)
604
- server_fsic = json.loads(transfersession.server_fsic)
605
- client_fsic = json.loads(transfersession.client_fsic)
620
+ with _begin_transaction(filter_prefixes, shared_lock=True):
621
+ server_fsic = json.loads(transfersession.server_fsic)
622
+ client_fsic = json.loads(transfersession.client_fsic)
606
623
 
607
- if transfersession.push:
608
- fsics = calculate_directional_fsic_diff(client_fsic, server_fsic)
609
- else:
610
- fsics = calculate_directional_fsic_diff(server_fsic, client_fsic)
611
-
612
- # if fsics are identical or receiving end has newer data, then there is nothing to queue
613
- if not fsics:
614
- return
615
-
616
- profile_condition = ["profile = '{}'".format(transfersession.sync_session.profile)]
617
- partition_conditions = []
618
- # create condition for filtering by partitions
619
- for prefix in filter_prefixes:
620
- partition_conditions += ["partition LIKE '{}%'".format(prefix)]
621
- if filter_prefixes:
622
- partition_conditions = [_join_with_logical_operator(partition_conditions, "OR")]
623
-
624
- chunk_size = 200
625
- fsics = list(fsics.items())
626
- fsics_len = len(fsics)
627
- fsics_limit = chunk_size * SQL_UNION_MAX
628
-
629
- if fsics_len >= fsics_limit:
630
- raise MorangoLimitExceeded(
631
- "Limit of {limit} instance counters exceeded with {actual}".format(
632
- limit=fsics_limit, actual=fsics_len
633
- )
634
- )
624
+ if transfersession.push:
625
+ fsics = calculate_directional_fsic_diff(client_fsic, server_fsic)
626
+ else:
627
+ fsics = calculate_directional_fsic_diff(server_fsic, client_fsic)
635
628
 
636
- # chunk fsics creating multiple SQL selects which will be unioned before insert
637
- i = 0
638
- chunk = fsics[:chunk_size]
639
- select_buffers = []
640
-
641
- while chunk:
642
- # create condition for all push FSICs where instance_ids are equal, but internal counters are higher than
643
- # FSICs counters
644
- last_saved_by_conditions = [
645
- "(last_saved_instance = '{0}' AND last_saved_counter > {1})".format(
646
- instance, counter
647
- )
648
- for instance, counter in chunk
629
+ # if fsics are identical or receiving end has newer data, then there is nothing to queue
630
+ if not fsics:
631
+ return
632
+
633
+ profile_condition = [
634
+ "profile = '{}'".format(transfersession.sync_session.profile)
649
635
  ]
650
- if last_saved_by_conditions:
651
- last_saved_by_conditions = [
652
- _join_with_logical_operator(last_saved_by_conditions, "OR")
636
+ partition_conditions = []
637
+ # create condition for filtering by partitions
638
+ for prefix in filter_prefixes:
639
+ partition_conditions += ["partition LIKE '{}%'".format(prefix)]
640
+ if filter_prefixes:
641
+ partition_conditions = [
642
+ _join_with_logical_operator(partition_conditions, "OR")
653
643
  ]
654
644
 
655
- # combine conditions and filter by profile
656
- where_condition = _join_with_logical_operator(
657
- profile_condition + last_saved_by_conditions + partition_conditions, "AND"
658
- )
645
+ chunk_size = 200
646
+ fsics = list(fsics.items())
647
+ fsics_len = len(fsics)
648
+ fsics_limit = chunk_size * SQL_UNION_MAX
659
649
 
660
- # execute raw sql to take all records that match condition, to be put into buffer for transfer
661
- select_buffers.append(
662
- """SELECT
663
- id, serialized, deleted, last_saved_instance, last_saved_counter, hard_deleted, model_name, profile,
664
- partition, source_id, conflicting_serialized_data,
665
- CAST ('{transfer_session_id}' AS {transfer_session_id_type}), _self_ref_fk
666
- FROM {store} WHERE {condition}
667
- """.format(
668
- transfer_session_id=transfersession.id,
669
- transfer_session_id_type=TransferSession._meta.pk.rel_db_type(
670
- connection
671
- ),
672
- condition=where_condition,
673
- store=Store._meta.db_table,
650
+ if fsics_len >= fsics_limit:
651
+ raise MorangoLimitExceeded(
652
+ "Limit of {limit} instance counters exceeded with {actual}".format(
653
+ limit=fsics_limit, actual=fsics_len
654
+ )
674
655
  )
675
- )
676
- i += chunk_size
677
- chunk = fsics[i : i + chunk_size]
678
-
679
- # take all record max counters that are foreign keyed onto store models, which were queued into the buffer
680
- select_rmc_buffer_query = """SELECT instance_id, counter, CAST ('{transfer_session_id}' AS {transfer_session_id_type}), store_model_id
681
- FROM {record_max_counter} AS rmc
682
- INNER JOIN {outgoing_buffer} AS buffer ON rmc.store_model_id = buffer.model_uuid
683
- WHERE buffer.transfer_session_id = '{transfer_session_id}'
684
- """.format(
685
- transfer_session_id=transfersession.id,
686
- transfer_session_id_type=TransferSession._meta.pk.rel_db_type(connection),
687
- record_max_counter=RecordMaxCounter._meta.db_table,
688
- outgoing_buffer=Buffer._meta.db_table,
689
- )
690
656
 
691
- with connection.cursor() as cursor:
692
- cursor.execute(
693
- """INSERT INTO {outgoing_buffer}
694
- (model_uuid, serialized, deleted, last_saved_instance, last_saved_counter,
695
- hard_deleted, model_name, profile, partition, source_id, conflicting_serialized_data,
696
- transfer_session_id, _self_ref_fk)
697
- {select}
698
- """.format(
699
- outgoing_buffer=Buffer._meta.db_table,
700
- select=" UNION ".join(select_buffers),
657
+ # chunk fsics creating multiple SQL selects which will be unioned before insert
658
+ i = 0
659
+ chunk = fsics[:chunk_size]
660
+ select_buffers = []
661
+
662
+ while chunk:
663
+ # create condition for all push FSICs where instance_ids are equal, but internal counters are higher than
664
+ # FSICs counters
665
+ last_saved_by_conditions = [
666
+ "(last_saved_instance = '{0}' AND last_saved_counter > {1})".format(
667
+ instance, counter
668
+ )
669
+ for instance, counter in chunk
670
+ ]
671
+ if last_saved_by_conditions:
672
+ last_saved_by_conditions = [
673
+ _join_with_logical_operator(last_saved_by_conditions, "OR")
674
+ ]
675
+
676
+ # combine conditions and filter by profile
677
+ where_condition = _join_with_logical_operator(
678
+ profile_condition + last_saved_by_conditions + partition_conditions,
679
+ "AND",
701
680
  )
702
- )
703
- cursor.execute(
704
- """INSERT INTO {outgoing_rmcb}
705
- (instance_id, counter, transfer_session_id, model_uuid)
706
- {select}
707
- """.format(
708
- outgoing_rmcb=RecordMaxCounterBuffer._meta.db_table,
709
- select=select_rmc_buffer_query,
681
+
682
+ # execute raw sql to take all records that match condition, to be put into buffer for transfer
683
+ select_buffers.append(
684
+ """SELECT
685
+ id, serialized, deleted, last_saved_instance, last_saved_counter, hard_deleted, model_name, profile,
686
+ partition, source_id, conflicting_serialized_data,
687
+ CAST ('{transfer_session_id}' AS {transfer_session_id_type}), _self_ref_fk
688
+ FROM {store} WHERE {condition}
689
+ """.format(
690
+ transfer_session_id=transfersession.id,
691
+ transfer_session_id_type=TransferSession._meta.pk.rel_db_type(
692
+ connection
693
+ ),
694
+ condition=where_condition,
695
+ store=Store._meta.db_table,
696
+ )
710
697
  )
698
+ i += chunk_size
699
+ chunk = fsics[i : i + chunk_size]
700
+
701
+ # take all record max counters that are foreign keyed onto store models, which were queued into the buffer
702
+ select_rmc_buffer_query = """SELECT instance_id, counter, CAST ('{transfer_session_id}' AS {transfer_session_id_type}), store_model_id
703
+ FROM {record_max_counter} AS rmc
704
+ INNER JOIN {outgoing_buffer} AS buffer ON rmc.store_model_id = buffer.model_uuid
705
+ WHERE buffer.transfer_session_id = '{transfer_session_id}'
706
+ """.format(
707
+ transfer_session_id=transfersession.id,
708
+ transfer_session_id_type=TransferSession._meta.pk.rel_db_type(connection),
709
+ record_max_counter=RecordMaxCounter._meta.db_table,
710
+ outgoing_buffer=Buffer._meta.db_table,
711
711
  )
712
712
 
713
+ with connection.cursor() as cursor:
714
+ cursor.execute(
715
+ """INSERT INTO {outgoing_buffer}
716
+ (model_uuid, serialized, deleted, last_saved_instance, last_saved_counter,
717
+ hard_deleted, model_name, profile, partition, source_id, conflicting_serialized_data,
718
+ transfer_session_id, _self_ref_fk)
719
+ {select}
720
+ """.format(
721
+ outgoing_buffer=Buffer._meta.db_table,
722
+ select=" UNION ".join(select_buffers),
723
+ )
724
+ )
725
+ cursor.execute(
726
+ """INSERT INTO {outgoing_rmcb}
727
+ (instance_id, counter, transfer_session_id, model_uuid)
728
+ {select}
729
+ """.format(
730
+ outgoing_rmcb=RecordMaxCounterBuffer._meta.db_table,
731
+ select=select_rmc_buffer_query,
732
+ )
733
+ )
734
+
713
735
 
714
- @transaction.atomic(using=USING_DB)
715
736
  def _queue_into_buffer_v2(transfersession, chunk_size=200):
716
737
  """
717
738
  Takes a chunk of data from the store to be put into the buffer to be sent to another morango instance.
@@ -723,138 +744,142 @@ def _queue_into_buffer_v2(transfersession, chunk_size=200):
723
744
  We use raw sql queries to place data in the buffer and the record max counter buffer, which matches the conditions of the FSIC.
724
745
  """
725
746
  sync_filter = Filter(transfersession.filter)
726
- server_fsic = json.loads(transfersession.server_fsic)
727
- client_fsic = json.loads(transfersession.client_fsic)
728
-
729
- assert "sub" in server_fsic
730
- assert "super" in server_fsic
731
- assert "sub" in client_fsic
732
- assert "super" in client_fsic
733
-
734
- # ensure that the partitions in the FSICs are under the current filter, before using them
735
- for partition in itertools.chain(
736
- server_fsic["sub"].keys(), client_fsic["sub"].keys()
737
- ):
738
- if partition not in sync_filter:
739
- raise MorangoInvalidFSICPartition(
740
- "Partition '{}' is not in filter".format(partition)
741
- )
747
+ with _begin_transaction(sync_filter, shared_lock=True):
748
+ server_fsic = json.loads(transfersession.server_fsic)
749
+ client_fsic = json.loads(transfersession.client_fsic)
750
+
751
+ assert "sub" in server_fsic
752
+ assert "super" in server_fsic
753
+ assert "sub" in client_fsic
754
+ assert "super" in client_fsic
755
+
756
+ # ensure that the partitions in the FSICs are under the current filter, before using them
757
+ for partition in itertools.chain(
758
+ server_fsic["sub"].keys(), client_fsic["sub"].keys()
759
+ ):
760
+ if partition not in sync_filter:
761
+ raise MorangoInvalidFSICPartition(
762
+ "Partition '{}' is not in filter".format(partition)
763
+ )
742
764
 
743
- server_fsic = expand_fsic_for_use(server_fsic, sync_filter)
744
- client_fsic = expand_fsic_for_use(client_fsic, sync_filter)
765
+ server_fsic = expand_fsic_for_use(server_fsic, sync_filter)
766
+ client_fsic = expand_fsic_for_use(client_fsic, sync_filter)
745
767
 
746
- if transfersession.push:
747
- fsics = calculate_directional_fsic_diff_v2(client_fsic, server_fsic)
748
- else:
749
- fsics = calculate_directional_fsic_diff_v2(server_fsic, client_fsic)
768
+ if transfersession.push:
769
+ fsics = calculate_directional_fsic_diff_v2(client_fsic, server_fsic)
770
+ else:
771
+ fsics = calculate_directional_fsic_diff_v2(server_fsic, client_fsic)
750
772
 
751
- # if fsics are identical or receiving end has newer data, then there is nothing to queue
752
- if not fsics:
753
- return
773
+ # if fsics are identical or receiving end has newer data, then there is nothing to queue
774
+ if not fsics:
775
+ return
754
776
 
755
- profile_condition = ["profile = '{}'".format(transfersession.sync_session.profile)]
777
+ profile_condition = [
778
+ "profile = '{}'".format(transfersession.sync_session.profile)
779
+ ]
756
780
 
757
- fsics_len = sum(len(fsics[part]) for part in fsics) + len(fsics)
758
- # subtract one because when partitions overflow chunks they add up to an extra item per chunk
759
- fsics_limit = chunk_size * (SQL_UNION_MAX - 1)
781
+ fsics_len = sum(len(fsics[part]) for part in fsics) + len(fsics)
782
+ # subtract one because when partitions overflow chunks they add up to an extra item per chunk
783
+ fsics_limit = chunk_size * (SQL_UNION_MAX - 1)
760
784
 
761
- if fsics_len >= fsics_limit:
762
- raise MorangoLimitExceeded(
763
- "Limit of {limit} instances + partitions exceeded with {actual}".format(
764
- limit=fsics_limit, actual=fsics_len
785
+ if fsics_len >= fsics_limit:
786
+ raise MorangoLimitExceeded(
787
+ "Limit of {limit} instances + partitions exceeded with {actual}".format(
788
+ limit=fsics_limit, actual=fsics_len
789
+ )
765
790
  )
766
- )
767
791
 
768
- # if needed, split the fsics into chunks
769
- if fsics_len > chunk_size:
770
- chunked_fsics = chunk_fsic_v2(fsics, chunk_size)
771
- else:
772
- chunked_fsics = [fsics]
792
+ # if needed, split the fsics into chunks
793
+ if fsics_len > chunk_size:
794
+ chunked_fsics = chunk_fsic_v2(fsics, chunk_size)
795
+ else:
796
+ chunked_fsics = [fsics]
773
797
 
774
- select_buffers = []
798
+ select_buffers = []
775
799
 
776
- for fsic_chunk in chunked_fsics:
800
+ for fsic_chunk in chunked_fsics:
777
801
 
778
- # create condition for filtering by partitions
779
- partition_conditions = []
780
- for part, insts in fsic_chunk.items():
781
- if not insts:
782
- continue
783
-
784
- partition_conditions.append(
785
- "partition LIKE '{}%' AND (".format(part)
786
- + _join_with_logical_operator(
787
- [
788
- "(last_saved_instance = '{}' AND last_saved_counter > {})".format(
789
- inst, counter
790
- )
791
- for inst, counter in insts.items()
792
- ],
793
- "OR",
794
- )
795
- + ")"
796
- )
802
+ # create condition for filtering by partitions
803
+ partition_conditions = []
804
+ for part, insts in fsic_chunk.items():
805
+ if not insts:
806
+ continue
797
807
 
798
- partition_conditions = [_join_with_logical_operator(partition_conditions, "OR")]
808
+ partition_conditions.append(
809
+ "partition LIKE '{}%' AND (".format(part)
810
+ + _join_with_logical_operator(
811
+ [
812
+ "(last_saved_instance = '{}' AND last_saved_counter > {})".format(
813
+ inst, counter
814
+ )
815
+ for inst, counter in insts.items()
816
+ ],
817
+ "OR",
818
+ )
819
+ + ")"
820
+ )
799
821
 
800
- # combine conditions and filter by profile
801
- where_condition = _join_with_logical_operator(
802
- profile_condition + partition_conditions, "AND"
803
- )
822
+ partition_conditions = [
823
+ _join_with_logical_operator(partition_conditions, "OR")
824
+ ]
804
825
 
805
- # execute raw sql to take all records that match condition, to be put into buffer for transfer
806
- select_buffers.append(
807
- """SELECT
808
- id, serialized, deleted, last_saved_instance, last_saved_counter, hard_deleted, model_name, profile,
809
- partition, source_id, conflicting_serialized_data,
810
- CAST ('{transfer_session_id}' AS {transfer_session_id_type}), _self_ref_fk
811
- FROM {store} WHERE {condition}
812
- """.format(
813
- transfer_session_id=transfersession.id,
814
- transfer_session_id_type=TransferSession._meta.pk.rel_db_type(
815
- connection
816
- ),
817
- condition=where_condition,
818
- store=Store._meta.db_table,
826
+ # combine conditions and filter by profile
827
+ where_condition = _join_with_logical_operator(
828
+ profile_condition + partition_conditions, "AND"
819
829
  )
820
- )
821
830
 
822
- # take all record max counters that are foreign keyed onto store models, which were queued into the buffer
823
- select_rmc_buffer_query = """SELECT instance_id, counter, CAST ('{transfer_session_id}' AS {transfer_session_id_type}), store_model_id
824
- FROM {record_max_counter} AS rmc
825
- INNER JOIN {outgoing_buffer} AS buffer ON rmc.store_model_id = buffer.model_uuid
826
- WHERE buffer.transfer_session_id = '{transfer_session_id}'
827
- """.format(
828
- transfer_session_id=transfersession.id,
829
- transfer_session_id_type=TransferSession._meta.pk.rel_db_type(connection),
830
- record_max_counter=RecordMaxCounter._meta.db_table,
831
- outgoing_buffer=Buffer._meta.db_table,
832
- )
831
+ # execute raw sql to take all records that match condition, to be put into buffer for transfer
832
+ select_buffers.append(
833
+ """SELECT
834
+ id, serialized, deleted, last_saved_instance, last_saved_counter, hard_deleted, model_name, profile,
835
+ partition, source_id, conflicting_serialized_data,
836
+ CAST ('{transfer_session_id}' AS {transfer_session_id_type}), _self_ref_fk
837
+ FROM {store} WHERE {condition}
838
+ """.format(
839
+ transfer_session_id=transfersession.id,
840
+ transfer_session_id_type=TransferSession._meta.pk.rel_db_type(
841
+ connection
842
+ ),
843
+ condition=where_condition,
844
+ store=Store._meta.db_table,
845
+ )
846
+ )
833
847
 
834
- with connection.cursor() as cursor:
835
- cursor.execute(
836
- """INSERT INTO {outgoing_buffer}
837
- (model_uuid, serialized, deleted, last_saved_instance, last_saved_counter,
838
- hard_deleted, model_name, profile, partition, source_id, conflicting_serialized_data,
839
- transfer_session_id, _self_ref_fk)
840
- {select}
848
+ # take all record max counters that are foreign keyed onto store models, which were queued into the buffer
849
+ select_rmc_buffer_query = """SELECT instance_id, counter, CAST ('{transfer_session_id}' AS {transfer_session_id_type}), store_model_id
850
+ FROM {record_max_counter} AS rmc
851
+ INNER JOIN {outgoing_buffer} AS buffer ON rmc.store_model_id = buffer.model_uuid
852
+ WHERE buffer.transfer_session_id = '{transfer_session_id}'
841
853
  """.format(
842
- outgoing_buffer=Buffer._meta.db_table,
843
- select=" UNION ".join(select_buffers),
844
- )
854
+ transfer_session_id=transfersession.id,
855
+ transfer_session_id_type=TransferSession._meta.pk.rel_db_type(connection),
856
+ record_max_counter=RecordMaxCounter._meta.db_table,
857
+ outgoing_buffer=Buffer._meta.db_table,
845
858
  )
846
- cursor.execute(
847
- """INSERT INTO {outgoing_rmcb}
848
- (instance_id, counter, transfer_session_id, model_uuid)
849
- {select}
850
- """.format(
851
- outgoing_rmcb=RecordMaxCounterBuffer._meta.db_table,
852
- select=select_rmc_buffer_query,
859
+
860
+ with connection.cursor() as cursor:
861
+ cursor.execute(
862
+ """INSERT INTO {outgoing_buffer}
863
+ (model_uuid, serialized, deleted, last_saved_instance, last_saved_counter,
864
+ hard_deleted, model_name, profile, partition, source_id, conflicting_serialized_data,
865
+ transfer_session_id, _self_ref_fk)
866
+ {select}
867
+ """.format(
868
+ outgoing_buffer=Buffer._meta.db_table,
869
+ select=" UNION ".join(select_buffers),
870
+ )
871
+ )
872
+ cursor.execute(
873
+ """INSERT INTO {outgoing_rmcb}
874
+ (instance_id, counter, transfer_session_id, model_uuid)
875
+ {select}
876
+ """.format(
877
+ outgoing_rmcb=RecordMaxCounterBuffer._meta.db_table,
878
+ select=select_rmc_buffer_query,
879
+ )
853
880
  )
854
- )
855
881
 
856
882
 
857
- @transaction.atomic(using=USING_DB)
858
883
  def _dequeue_into_store(transfer_session, fsic, v2_format=False):
859
884
  """
860
885
  Takes data from the buffers and merges into the store and record max counters.
@@ -862,29 +887,31 @@ def _dequeue_into_store(transfer_session, fsic, v2_format=False):
862
887
  ALGORITHM: Incrementally insert and delete on a case by case basis to ensure subsequent cases
863
888
  are not affected by previous cases.
864
889
  """
865
- with connection.cursor() as cursor:
866
- DBBackend._dequeuing_delete_rmcb_records(cursor, transfer_session.id)
867
- DBBackend._dequeuing_delete_buffered_records(cursor, transfer_session.id)
868
- current_id = InstanceIDModel.get_current_instance_and_increment_counter()
869
- DBBackend._dequeuing_merge_conflict_buffer(
870
- cursor, current_id, transfer_session.id
871
- )
872
- DBBackend._dequeuing_merge_conflict_rmcb(cursor, transfer_session.id)
873
- DBBackend._dequeuing_update_rmcs_last_saved_by(
874
- cursor, current_id, transfer_session.id
890
+
891
+ with _begin_transaction(Filter(transfer_session.filter)):
892
+ with connection.cursor() as cursor:
893
+ DBBackend._dequeuing_delete_rmcb_records(cursor, transfer_session.id)
894
+ DBBackend._dequeuing_delete_buffered_records(cursor, transfer_session.id)
895
+ current_id = InstanceIDModel.get_current_instance_and_increment_counter()
896
+ DBBackend._dequeuing_merge_conflict_buffer(
897
+ cursor, current_id, transfer_session.id
898
+ )
899
+ DBBackend._dequeuing_merge_conflict_rmcb(cursor, transfer_session.id)
900
+ DBBackend._dequeuing_update_rmcs_last_saved_by(
901
+ cursor, current_id, transfer_session.id
902
+ )
903
+ DBBackend._dequeuing_delete_mc_rmcb(cursor, transfer_session.id)
904
+ DBBackend._dequeuing_delete_mc_buffer(cursor, transfer_session.id)
905
+ DBBackend._dequeuing_insert_remaining_buffer(cursor, transfer_session.id)
906
+ DBBackend._dequeuing_insert_remaining_rmcb(cursor, transfer_session.id)
907
+ DBBackend._dequeuing_delete_remaining_rmcb(cursor, transfer_session.id)
908
+ DBBackend._dequeuing_delete_remaining_buffer(cursor, transfer_session.id)
909
+
910
+ DatabaseMaxCounter.update_fsics(
911
+ json.loads(fsic),
912
+ transfer_session.get_filter(),
913
+ v2_format=v2_format,
875
914
  )
876
- DBBackend._dequeuing_delete_mc_rmcb(cursor, transfer_session.id)
877
- DBBackend._dequeuing_delete_mc_buffer(cursor, transfer_session.id)
878
- DBBackend._dequeuing_insert_remaining_buffer(cursor, transfer_session.id)
879
- DBBackend._dequeuing_insert_remaining_rmcb(cursor, transfer_session.id)
880
- DBBackend._dequeuing_delete_remaining_rmcb(cursor, transfer_session.id)
881
- DBBackend._dequeuing_delete_remaining_buffer(cursor, transfer_session.id)
882
-
883
- DatabaseMaxCounter.update_fsics(
884
- json.loads(fsic),
885
- transfer_session.get_filter(),
886
- v2_format=v2_format,
887
- )
888
915
 
889
916
 
890
917
  class BaseOperation(object):
@@ -1019,7 +1046,14 @@ class SerializeOperation(LocalOperation):
1019
1046
  self._assert(context.filter is not None)
1020
1047
 
1021
1048
  if context.is_producer and SETTINGS.MORANGO_SERIALIZE_BEFORE_QUEUING:
1022
- _serialize_into_store(context.sync_session.profile, filter=context.filter)
1049
+ try:
1050
+ _serialize_into_store(context.sync_session.profile, filter=context.filter)
1051
+ except OperationalError as e:
1052
+ # if we run into a transaction isolation error, we return a pending status to force
1053
+ # retrying through the controller flow
1054
+ if DBBackend._is_transaction_isolation_error(e):
1055
+ return transfer_statuses.PENDING
1056
+ raise e
1023
1057
 
1024
1058
  fsic = json.dumps(
1025
1059
  DatabaseMaxCounter.calculate_filter_specific_instance_counters(
@@ -1104,6 +1138,38 @@ class PullProducerOperation(LocalOperation):
1104
1138
  return transfer_statuses.PENDING
1105
1139
 
1106
1140
 
1141
+ class PullReceiverOperation(LocalOperation):
1142
+ """
1143
+ Operation that ensures we don't run into unhandled stages when the sync is a pull,
1144
+ and the local instance is receiving the pulled data
1145
+ """
1146
+
1147
+ def handle(self, context):
1148
+ """
1149
+ :type context: LocalSessionContext
1150
+ """
1151
+ self._assert(context.is_pull)
1152
+ self._assert(context.is_receiver)
1153
+ self._assert(context.request is None)
1154
+ return transfer_statuses.COMPLETED
1155
+
1156
+
1157
+ class PushProducerOperation(LocalOperation):
1158
+ """
1159
+ Operation that ensures we don't run into unhandled stages when the sync is a push,
1160
+ but we're the local instance is the one pushing data
1161
+ """
1162
+
1163
+ def handle(self, context):
1164
+ """
1165
+ :type context: LocalSessionContext
1166
+ """
1167
+ self._assert(context.is_push)
1168
+ self._assert(context.is_producer)
1169
+ self._assert(context.request is None)
1170
+ return transfer_statuses.COMPLETED
1171
+
1172
+
1107
1173
  class PushReceiverOperation(LocalOperation):
1108
1174
  """
1109
1175
  Operation that handles the result of a push, as the server, using a local context / session
@@ -1207,9 +1273,16 @@ class ReceiverDeserializeOperation(LocalOperation):
1207
1273
 
1208
1274
  records_transferred = context.transfer_session.records_transferred or 0
1209
1275
  if SETTINGS.MORANGO_DESERIALIZE_AFTER_DEQUEUING and records_transferred > 0:
1210
- # we first serialize to avoid deserialization merge conflicts
1211
- _serialize_into_store(context.sync_session.profile, filter=context.filter)
1212
- _deserialize_from_store(context.sync_session.profile, filter=context.filter)
1276
+ try:
1277
+ # we first serialize to avoid deserialization merge conflicts
1278
+ _serialize_into_store(context.sync_session.profile, filter=context.filter)
1279
+ _deserialize_from_store(context.sync_session.profile, filter=context.filter)
1280
+ except OperationalError as e:
1281
+ # if we run into a transaction isolation error, we return a pending status to force
1282
+ # retrying through the controller flow
1283
+ if DBBackend._is_transaction_isolation_error(e):
1284
+ return transfer_statuses.PENDING
1285
+ raise e
1213
1286
 
1214
1287
  return transfer_statuses.COMPLETED
1215
1288