sequent 7.0.0 → 7.1.0
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/db/sequent_schema.rb +5 -0
- data/lib/sequent/configuration.rb +8 -13
- data/lib/sequent/core/event_record.rb +1 -0
- data/lib/sequent/core/event_store.rb +4 -0
- data/lib/sequent/core/helpers/message_handler.rb +10 -11
- data/lib/sequent/core/helpers/message_router.rb +13 -7
- data/lib/sequent/core/persistors/active_record_persistor.rb +4 -0
- data/lib/sequent/core/persistors/persistor.rb +5 -0
- data/lib/sequent/core/persistors/replay_optimized_postgres_persistor.rb +140 -133
- data/lib/sequent/dry_run/dry_run.rb +4 -0
- data/lib/sequent/dry_run/read_only_replay_optimized_postgres_persistor.rb +26 -0
- data/lib/sequent/dry_run/view_schema.rb +36 -0
- data/lib/sequent/generator/template_project/db/sequent_schema.rb +1 -0
- data/lib/sequent/migrations/errors.rb +12 -0
- data/lib/sequent/migrations/planner.rb +7 -3
- data/lib/sequent/migrations/versions.rb +82 -0
- data/lib/sequent/migrations/view_schema.rb +101 -58
- data/lib/sequent/rake/migration_tasks.rb +89 -6
- data/lib/sequent/support/database.rb +1 -11
- data/lib/sequent/support.rb +0 -2
- data/lib/sequent/util/util.rb +1 -0
- data/lib/sequent/util/web/clear_cache.rb +19 -0
- data/lib/sequent.rb +1 -0
- data/lib/version.rb +1 -1
- metadata +20 -29
- data/lib/sequent/core/helpers/message_dispatcher.rb +0 -23
- data/lib/sequent/support/view_projection.rb +0 -61
- data/lib/sequent/support/view_schema.rb +0 -24
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 31a7eb29122b5706014155cacea33cc9064991eca03762d6c9eb0a300e4d3fe5
|
4
|
+
data.tar.gz: 4724c03d49b69fd01d111a53961936c3b1a6adba91bacb9af0abd97c4337d49a
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 6c13866f01dde089ae6cd95ca17fbbfd594209751300f429b0ef4f3680d19f1a468bf2675b24536b6e09150f80471afdab5a663be79fd45692fe758c47dd8f26
|
7
|
+
data.tar.gz: 96d19ae22452eb86ed09fda426e6cb56aad6b47be5589004a8ba0e24623bd8316297d8b29829210fafb57e75b251797007bb9b610d27850809ce4b1c661adcde
|
data/db/sequent_schema.rb
CHANGED
@@ -8,8 +8,12 @@ ActiveRecord::Schema.define do
|
|
8
8
|
t.text "event_json", :null => false
|
9
9
|
t.integer "command_record_id", :null => false
|
10
10
|
t.integer "stream_record_id", :null => false
|
11
|
+
t.bigint "xact_id"
|
11
12
|
end
|
12
13
|
|
14
|
+
execute %Q{
|
15
|
+
ALTER TABLE event_records ALTER COLUMN xact_id SET DEFAULT pg_current_xact_id()::text::bigint
|
16
|
+
}
|
13
17
|
execute %Q{
|
14
18
|
CREATE UNIQUE INDEX unique_event_per_aggregate ON event_records (
|
15
19
|
aggregate_id,
|
@@ -24,6 +28,7 @@ CREATE INDEX snapshot_events ON event_records (aggregate_id, sequence_number DES
|
|
24
28
|
add_index "event_records", ["command_record_id"], :name => "index_event_records_on_command_record_id"
|
25
29
|
add_index "event_records", ["event_type"], :name => "index_event_records_on_event_type"
|
26
30
|
add_index "event_records", ["created_at"], :name => "index_event_records_on_created_at"
|
31
|
+
add_index "event_records", ["xact_id"], :name => "index_event_records_on_xact_id"
|
27
32
|
|
28
33
|
create_table "command_records", :force => true do |t|
|
29
34
|
t.string "user_id"
|
@@ -10,7 +10,6 @@ require 'logger'
|
|
10
10
|
module Sequent
|
11
11
|
class Configuration
|
12
12
|
DEFAULT_VERSIONS_TABLE_NAME = 'sequent_versions'
|
13
|
-
DEFAULT_REPLAYED_IDS_TABLE_NAME = 'sequent_replayed_ids'
|
14
13
|
|
15
14
|
DEFAULT_MIGRATION_SQL_FILES_DIRECTORY = 'db/tables'
|
16
15
|
DEFAULT_DATABASE_CONFIG_DIRECTORY = 'db'
|
@@ -68,8 +67,7 @@ module Sequent
|
|
68
67
|
:enable_autoregistration
|
69
68
|
|
70
69
|
attr_reader :migrations_class_name,
|
71
|
-
:versions_table_name
|
72
|
-
:replayed_ids_table_name
|
70
|
+
:versions_table_name
|
73
71
|
|
74
72
|
def self.instance
|
75
73
|
@instance ||= new
|
@@ -104,7 +102,6 @@ module Sequent
|
|
104
102
|
self.event_publisher = Sequent::Core::EventPublisher.new
|
105
103
|
self.disable_event_handlers = false
|
106
104
|
self.versions_table_name = DEFAULT_VERSIONS_TABLE_NAME
|
107
|
-
self.replayed_ids_table_name = DEFAULT_REPLAYED_IDS_TABLE_NAME
|
108
105
|
self.migration_sql_files_directory = DEFAULT_MIGRATION_SQL_FILES_DIRECTORY
|
109
106
|
self.view_schema_name = DEFAULT_VIEW_SCHEMA_NAME
|
110
107
|
self.event_store_schema_name = DEFAULT_EVENT_STORE_SCHEMA_NAME
|
@@ -135,18 +132,11 @@ module Sequent
|
|
135
132
|
enable_multiple_database_support && ActiveRecord.version > Gem::Version.new('6.1.0')
|
136
133
|
end
|
137
134
|
|
138
|
-
def replayed_ids_table_name=(table_name)
|
139
|
-
fail ArgumentError, 'table_name can not be nil' unless table_name
|
140
|
-
|
141
|
-
@replayed_ids_table_name = table_name
|
142
|
-
Sequent::Migrations::ViewSchema::ReplayedIds.table_name = table_name
|
143
|
-
end
|
144
|
-
|
145
135
|
def versions_table_name=(table_name)
|
146
136
|
fail ArgumentError, 'table_name can not be nil' unless table_name
|
147
137
|
|
148
138
|
@versions_table_name = table_name
|
149
|
-
Sequent::Migrations::
|
139
|
+
Sequent::Migrations::Versions.table_name = table_name
|
150
140
|
end
|
151
141
|
|
152
142
|
def migrations_class_name=(class_name)
|
@@ -158,13 +148,14 @@ module Sequent
|
|
158
148
|
@migrations_class_name = class_name
|
159
149
|
end
|
160
150
|
|
151
|
+
# @!visibility private
|
161
152
|
def autoregister!
|
162
153
|
return unless enable_autoregistration
|
163
154
|
|
164
155
|
# Only autoregister the AggregateSnapshotter if the autoregistration is enabled
|
165
156
|
Sequent::Core::AggregateSnapshotter.skip_autoregister = false
|
166
157
|
|
167
|
-
|
158
|
+
autoload_if_in_rails
|
168
159
|
|
169
160
|
self.class.instance.command_handlers ||= []
|
170
161
|
for_each_autoregisterable_descenant_of(Sequent::CommandHandler) do |command_handler_class|
|
@@ -198,6 +189,10 @@ module Sequent
|
|
198
189
|
|
199
190
|
private
|
200
191
|
|
192
|
+
def autoload_if_in_rails
|
193
|
+
Rails.autoloaders.main.eager_load(force: true) if defined?(Rails) && Rails.respond_to?(:autoloaders)
|
194
|
+
end
|
195
|
+
|
201
196
|
def for_each_autoregisterable_descenant_of(clazz, &block)
|
202
197
|
clazz
|
203
198
|
.descendants
|
@@ -209,6 +209,10 @@ module Sequent
|
|
209
209
|
|
210
210
|
private
|
211
211
|
|
212
|
+
def quote_table_name(table_name)
|
213
|
+
Sequent.configuration.event_record_class.connection.quote_table_name(table_name)
|
214
|
+
end
|
215
|
+
|
212
216
|
def event_types
|
213
217
|
@event_types = if Sequent.configuration.event_store_cache_event_types
|
214
218
|
ThreadSafe::Cache.new
|
@@ -2,7 +2,6 @@
|
|
2
2
|
|
3
3
|
require_relative 'message_handler_option_registry'
|
4
4
|
require_relative 'message_router'
|
5
|
-
require_relative 'message_dispatcher'
|
6
5
|
|
7
6
|
module Sequent
|
8
7
|
module Core
|
@@ -60,11 +59,7 @@ module Sequent
|
|
60
59
|
end
|
61
60
|
|
62
61
|
def message_mapping
|
63
|
-
message_router
|
64
|
-
.routes
|
65
|
-
.select { |matcher, _handlers| matcher.is_a?(MessageMatchers::InstanceOf) }
|
66
|
-
.map { |k, v| [k.expected_class, v] }
|
67
|
-
.to_h
|
62
|
+
message_router.instanceof_routes
|
68
63
|
end
|
69
64
|
|
70
65
|
def handles_message?(message)
|
@@ -106,13 +101,17 @@ module Sequent
|
|
106
101
|
end
|
107
102
|
|
108
103
|
def handle_message(message)
|
109
|
-
|
104
|
+
handlers = self.class.message_router.match_message(message)
|
105
|
+
dispatch_message(message, handlers) unless handlers.empty?
|
110
106
|
end
|
111
107
|
|
112
|
-
|
113
|
-
|
114
|
-
|
115
|
-
|
108
|
+
def dispatch_message(message, handlers)
|
109
|
+
handlers.each do |handler|
|
110
|
+
if Sequent.logger.debug?
|
111
|
+
Sequent.logger.debug("[MessageHandler] Handler #{self.class} handling #{message.class}")
|
112
|
+
end
|
113
|
+
instance_exec(message, &handler)
|
114
|
+
end
|
116
115
|
end
|
117
116
|
end
|
118
117
|
end
|
@@ -7,7 +7,7 @@ module Sequent
|
|
7
7
|
module Core
|
8
8
|
module Helpers
|
9
9
|
class MessageRouter
|
10
|
-
attr_reader :routes
|
10
|
+
attr_reader :routes, :instanceof_routes
|
11
11
|
|
12
12
|
def initialize
|
13
13
|
clear_routes
|
@@ -21,7 +21,11 @@ module Sequent
|
|
21
21
|
#
|
22
22
|
def register_matchers(*matchers, handler)
|
23
23
|
matchers.each do |matcher|
|
24
|
-
|
24
|
+
if matcher.is_a?(MessageMatchers::InstanceOf)
|
25
|
+
@instanceof_routes[matcher.expected_class] << handler
|
26
|
+
else
|
27
|
+
@routes[matcher] << handler
|
28
|
+
end
|
25
29
|
end
|
26
30
|
end
|
27
31
|
|
@@ -29,11 +33,12 @@ module Sequent
|
|
29
33
|
# Returns a set of handlers that match the given message, or an empty set when none match.
|
30
34
|
#
|
31
35
|
def match_message(message)
|
32
|
-
|
33
|
-
|
34
|
-
|
35
|
-
|
36
|
-
|
36
|
+
result = Set.new
|
37
|
+
result.merge(@instanceof_routes[message.class])
|
38
|
+
@routes.each do |matcher, handlers|
|
39
|
+
result.merge(handlers) if matcher.matches_message?(message)
|
40
|
+
end
|
41
|
+
result
|
37
42
|
end
|
38
43
|
|
39
44
|
##
|
@@ -47,6 +52,7 @@ module Sequent
|
|
47
52
|
# Removes all routes from the router.
|
48
53
|
#
|
49
54
|
def clear_routes
|
55
|
+
@instanceof_routes = Hash.new { |h, k| h[k] = Set.new }
|
50
56
|
@routes = Hash.new { |h, k| h[k] = Set.new }
|
51
57
|
end
|
52
58
|
end
|
@@ -76,6 +76,11 @@ module Sequent
|
|
76
76
|
fail 'Method not supported in this persistor'
|
77
77
|
end
|
78
78
|
|
79
|
+
# Hook to implement for instance the persistor batches statements
|
80
|
+
def prepare
|
81
|
+
fail 'Method not supported in this persistor'
|
82
|
+
end
|
83
|
+
|
79
84
|
# Hook to implement for instance the persistor batches statements
|
80
85
|
def commit
|
81
86
|
fail 'Method not supported in this persistor'
|
@@ -41,13 +41,17 @@ module Sequent
|
|
41
41
|
# end
|
42
42
|
#
|
43
43
|
# In this case it is wise to create an index on InvoiceRecord on the aggregate_id and recipient_id
|
44
|
-
# like you would in the database.
|
44
|
+
# attributes like you would in the database. Note that previous versions of this class supported
|
45
|
+
# multi-column indexes. These are now split into multiple single-column indexes and the results of
|
46
|
+
# each index is combined using set-intersection. This reduces the amount of memory used and makes
|
47
|
+
# it possible to use an index in more cases (whenever an indexed attribute is present in the where
|
48
|
+
# clause the index will be used, so not all attributes need to be present).
|
45
49
|
#
|
46
50
|
# Example:
|
47
51
|
#
|
48
52
|
# ReplayOptimizedPostgresPersistor.new(
|
49
53
|
# 50,
|
50
|
-
# {InvoiceRecord => [
|
54
|
+
# {InvoiceRecord => [:aggregate_id, :recipient_id]}
|
51
55
|
# )
|
52
56
|
class ReplayOptimizedPostgresPersistor
|
53
57
|
include Persistor
|
@@ -56,108 +60,106 @@ module Sequent
|
|
56
60
|
attr_reader :record_store
|
57
61
|
attr_accessor :insert_with_csv_size
|
58
62
|
|
59
|
-
|
60
|
-
|
63
|
+
# We create a struct on the fly to represent an in-memory record.
|
64
|
+
#
|
65
|
+
# Since the replay happens in memory we implement the ==, eql? and hash methods
|
66
|
+
# to point to the same object. A record is the same if and only if they point to
|
67
|
+
# the same object. These methods are necessary since we use Set instead of [].
|
68
|
+
#
|
69
|
+
# Also basing equality on object identity is more consistent with ActiveRecord,
|
70
|
+
# which is the implementation used during normal (non-optimized) replay.
|
71
|
+
module InMemoryStruct
|
72
|
+
def ==(other)
|
73
|
+
equal?(other)
|
74
|
+
end
|
75
|
+
def eql?(other)
|
76
|
+
equal?(other)
|
77
|
+
end
|
78
|
+
def hash
|
79
|
+
object_id.hash
|
80
|
+
end
|
61
81
|
end
|
62
82
|
|
63
|
-
|
64
|
-
|
65
|
-
|
66
|
-
|
83
|
+
def struct_cache
|
84
|
+
@struct_cache ||= Hash.new do |hash, record_class|
|
85
|
+
struct_class = Struct.new(*record_class.column_names.map(&:to_sym), keyword_init: true) do
|
86
|
+
include InMemoryStruct
|
67
87
|
end
|
68
|
-
|
88
|
+
hash[record_class] = struct_class
|
69
89
|
end
|
70
90
|
end
|
71
91
|
|
72
92
|
class Index
|
93
|
+
attr_reader :indexed_columns
|
94
|
+
|
73
95
|
def initialize(indexed_columns)
|
74
|
-
@indexed_columns =
|
75
|
-
|
76
|
-
|
77
|
-
|
78
|
-
|
79
|
-
|
96
|
+
@indexed_columns = indexed_columns.to_set
|
97
|
+
@indexes = @indexed_columns.to_h do |field|
|
98
|
+
[field, {}]
|
99
|
+
end
|
100
|
+
@reverse_indexes = @indexed_columns.to_h do |field|
|
101
|
+
[field, {}.compare_by_identity]
|
80
102
|
end
|
81
|
-
|
82
|
-
@indexed_columns = @indexed_columns.merge(
|
83
|
-
indexed_columns.reduce({}) do |memo, (key, ics)|
|
84
|
-
memo.merge({key => ics.map { |c| c.map(&:to_s) }})
|
85
|
-
end,
|
86
|
-
)
|
87
|
-
|
88
|
-
@index = {}
|
89
|
-
@reverse_index = {}
|
90
103
|
end
|
91
104
|
|
92
|
-
def add(
|
93
|
-
|
94
|
-
|
95
|
-
|
96
|
-
|
97
|
-
@
|
98
|
-
|
99
|
-
@reverse_index[record.object_id.hash] = [] unless @reverse_index.key? record.object_id.hash
|
100
|
-
@reverse_index[record.object_id.hash] << key.hash
|
105
|
+
def add(record)
|
106
|
+
@indexes.map do |field, index|
|
107
|
+
key = Persistors.normalize_symbols(record[field]).freeze
|
108
|
+
records = index[key] || (index[key] = Set.new.compare_by_identity)
|
109
|
+
records << record
|
110
|
+
@reverse_indexes[field][record] = key
|
101
111
|
end
|
102
112
|
end
|
103
113
|
|
104
|
-
def remove(
|
105
|
-
|
106
|
-
|
107
|
-
|
108
|
-
|
109
|
-
return unless keys.any?
|
110
|
-
|
111
|
-
keys.each do |key|
|
112
|
-
@index[key].delete(record)
|
113
|
-
@index.delete(key) if @index[key].count == 0
|
114
|
+
def remove(record)
|
115
|
+
@indexes.map do |field, index|
|
116
|
+
key = @reverse_indexes[field].delete(record)
|
117
|
+
remaining = index[key]&.delete(record)
|
118
|
+
index.delete(key) if remaining&.empty?
|
114
119
|
end
|
115
120
|
end
|
116
121
|
|
117
|
-
def update(
|
118
|
-
remove(
|
119
|
-
add(
|
122
|
+
def update(record)
|
123
|
+
remove(record)
|
124
|
+
add(record)
|
120
125
|
end
|
121
126
|
|
122
|
-
def find(
|
123
|
-
|
124
|
-
|
125
|
-
|
126
|
-
|
127
|
+
def find(normalized_where_clause)
|
128
|
+
record_sets = normalized_where_clause.map do |(field, expected_value)|
|
129
|
+
if expected_value.is_a?(Array)
|
130
|
+
expected_value.reduce(Set.new.compare_by_identity) do |memo, value|
|
131
|
+
key = Persistors.normalize_symbols(value)
|
132
|
+
memo.merge(@indexes[field][key] || [])
|
133
|
+
end
|
134
|
+
else
|
135
|
+
key = Persistors.normalize_symbols(expected_value)
|
136
|
+
@indexes[field][key] || Set.new.compare_by_identity
|
137
|
+
end
|
138
|
+
end
|
139
|
+
|
140
|
+
smallest, *rest = record_sets.sort_by(&:size)
|
141
|
+
return smallest.to_a if smallest.empty? || rest.empty?
|
142
|
+
|
143
|
+
smallest.select do |record|
|
144
|
+
rest.all? { |x| x.include? record }
|
127
145
|
end
|
128
|
-
@index[key.hash] || []
|
129
146
|
end
|
130
147
|
|
131
148
|
def clear
|
132
|
-
@
|
133
|
-
|
149
|
+
@indexed_columns.each do |field|
|
150
|
+
@indexes[field].clear
|
151
|
+
@reverse_indexes[field].clear
|
152
|
+
end
|
134
153
|
end
|
135
154
|
|
136
|
-
def use_index?(
|
137
|
-
|
155
|
+
def use_index?(normalized_where_clause)
|
156
|
+
get_indexes(normalized_where_clause).present?
|
138
157
|
end
|
139
158
|
|
140
159
|
private
|
141
160
|
|
142
|
-
def
|
143
|
-
@indexed_columns.
|
144
|
-
end
|
145
|
-
|
146
|
-
def get_keys(record_class, record)
|
147
|
-
@indexed_columns[record_class].map do |index|
|
148
|
-
arr = [record_class.name]
|
149
|
-
index.each do |key|
|
150
|
-
arr << key
|
151
|
-
arr << record[key]
|
152
|
-
end
|
153
|
-
arr
|
154
|
-
end
|
155
|
-
end
|
156
|
-
|
157
|
-
def get_index(record_class, where_clause)
|
158
|
-
@indexed_columns[record_class].find do |indexed_where|
|
159
|
-
where_clause.keys.size == indexed_where.size && (where_clause.keys.map(&:to_s) - indexed_where).empty?
|
160
|
-
end
|
161
|
+
def get_indexes(normalized_where_clause)
|
162
|
+
@indexed_columns & normalized_where_clause.keys
|
161
163
|
end
|
162
164
|
end
|
163
165
|
|
@@ -167,17 +169,28 @@ module Sequent
|
|
167
169
|
# Key corresponds to the name of the 'Record'
|
168
170
|
# Values contains list of lists on which columns to index.
|
169
171
|
# E.g. [[:first_index_column], [:another_index, :with_to_columns]]
|
170
|
-
def initialize(insert_with_csv_size = 50, indices = {})
|
172
|
+
def initialize(insert_with_csv_size = 50, indices = {}, default_indexed_columns = [:aggregate_id])
|
171
173
|
@insert_with_csv_size = insert_with_csv_size
|
172
|
-
@record_store = Hash.new { |h, k| h[k] = Set.new }
|
173
|
-
@record_index =
|
174
|
+
@record_store = Hash.new { |h, k| h[k] = Set.new.compare_by_identity }
|
175
|
+
@record_index = Hash.new do |h, k|
|
176
|
+
h[k] = Index.new(default_indexed_columns.to_set & k.column_names.map(&:to_sym))
|
177
|
+
end
|
178
|
+
|
179
|
+
indices.each do |record_class, indexed_columns|
|
180
|
+
columns = indexed_columns.flatten(1).map(&:to_sym).to_set + default_indexed_columns
|
181
|
+
@record_index[record_class] = Index.new(columns & record_class.column_names.map(&:to_sym))
|
182
|
+
end
|
183
|
+
|
184
|
+
@record_defaults = Hash.new do |h, record_class|
|
185
|
+
h[record_class] = record_class.column_defaults.symbolize_keys
|
186
|
+
end
|
174
187
|
end
|
175
188
|
|
176
189
|
def update_record(record_class, event, where_clause = {aggregate_id: event.aggregate_id}, options = {})
|
177
190
|
record = get_record!(record_class, where_clause)
|
178
|
-
record.updated_at = event.created_at if record.respond_to?(:updated_at)
|
191
|
+
record.updated_at = event.created_at if record.respond_to?(:updated_at=)
|
179
192
|
yield record if block_given?
|
180
|
-
@record_index.update(
|
193
|
+
@record_index[record_class].update(record)
|
181
194
|
update_sequence_number = if options.key?(:update_sequence_number)
|
182
195
|
options[:update_sequence_number]
|
183
196
|
else
|
@@ -187,41 +200,16 @@ module Sequent
|
|
187
200
|
end
|
188
201
|
|
189
202
|
def create_record(record_class, values)
|
190
|
-
|
191
|
-
|
192
|
-
|
193
|
-
struct_class_name = "#{record_class}Struct"
|
194
|
-
if self.class.struct_cache.key?(struct_class_name)
|
195
|
-
struct_class = self.class.struct_cache[struct_class_name]
|
196
|
-
else
|
197
|
-
# We create a struct on the fly.
|
198
|
-
# Since the replay happens in memory we implement the ==, eql? and hash methods
|
199
|
-
# to point to the same object. A record is the same if and only if they point to
|
200
|
-
# the same object. These methods are necessary since we use Set instead of [].
|
201
|
-
class_def = <<-EOD
|
202
|
-
#{struct_class_name} = Struct.new(*#{column_names.map(&:to_sym)})
|
203
|
-
class #{struct_class_name}
|
204
|
-
include InitStruct
|
205
|
-
def ==(other)
|
206
|
-
self.equal?(other)
|
207
|
-
end
|
208
|
-
def hash
|
209
|
-
self.object_id.hash
|
210
|
-
end
|
211
|
-
end
|
212
|
-
EOD
|
213
|
-
# rubocop:disable Security/Eval
|
214
|
-
eval(class_def.to_s)
|
215
|
-
# rubocop:enable Security/Eval
|
216
|
-
struct_class = ReplayOptimizedPostgresPersistor.const_get(struct_class_name)
|
217
|
-
self.class.struct_cache[struct_class_name] = struct_class
|
203
|
+
record = struct_cache[record_class].new(**values)
|
204
|
+
@record_defaults[record_class].each do |column, default|
|
205
|
+
record[column] = default unless values.include? column
|
218
206
|
end
|
219
|
-
record =
|
207
|
+
record.updated_at = values[:created_at] if record.respond_to?(:updated_at)
|
220
208
|
|
221
209
|
yield record if block_given?
|
222
|
-
@record_store[record_class] << record
|
223
210
|
|
224
|
-
@
|
211
|
+
@record_store[record_class] << record
|
212
|
+
@record_index[record_class].add(record)
|
225
213
|
|
226
214
|
record
|
227
215
|
end
|
@@ -234,7 +222,7 @@ module Sequent
|
|
234
222
|
record = get_record(record_class, values)
|
235
223
|
record ||= create_record(record_class, values.merge(created_at: created_at))
|
236
224
|
yield record if block_given?
|
237
|
-
@record_index.update(
|
225
|
+
@record_index[record_class].update(record)
|
238
226
|
record
|
239
227
|
end
|
240
228
|
|
@@ -260,15 +248,15 @@ module Sequent
|
|
260
248
|
|
261
249
|
def delete_record(record_class, record)
|
262
250
|
@record_store[record_class].delete(record)
|
263
|
-
@record_index.remove(
|
251
|
+
@record_index[record_class].remove(record)
|
264
252
|
end
|
265
253
|
|
266
254
|
def update_all_records(record_class, where_clause, updates)
|
267
255
|
find_records(record_class, where_clause).each do |record|
|
268
256
|
updates.each_pair do |k, v|
|
269
|
-
record[k
|
257
|
+
record[k] = v
|
270
258
|
end
|
271
|
-
@record_index.update(
|
259
|
+
@record_index[record_class].update(record)
|
272
260
|
end
|
273
261
|
end
|
274
262
|
|
@@ -276,33 +264,41 @@ module Sequent
|
|
276
264
|
records = find_records(record_class, where_clause)
|
277
265
|
records.each do |record|
|
278
266
|
yield record
|
279
|
-
@record_index.update(
|
267
|
+
@record_index[record_class].update(record)
|
280
268
|
end
|
281
269
|
end
|
282
270
|
|
283
271
|
def do_with_record(record_class, where_clause)
|
284
272
|
record = get_record!(record_class, where_clause)
|
285
273
|
yield record
|
286
|
-
@record_index.update(
|
274
|
+
@record_index[record_class].update(record)
|
287
275
|
end
|
288
276
|
|
289
277
|
def find_records(record_class, where_clause)
|
290
|
-
|
291
|
-
|
292
|
-
|
293
|
-
|
294
|
-
|
295
|
-
|
296
|
-
|
297
|
-
|
298
|
-
|
299
|
-
|
300
|
-
|
301
|
-
|
302
|
-
|
278
|
+
where_clause = where_clause.symbolize_keys
|
279
|
+
|
280
|
+
indexed_columns = @record_index[record_class].indexed_columns
|
281
|
+
indexed_fields, non_indexed_fields = where_clause.partition { |field, _| indexed_columns.include? field }
|
282
|
+
|
283
|
+
candidate_records = if indexed_fields.present?
|
284
|
+
@record_index[record_class].find(indexed_fields)
|
285
|
+
else
|
286
|
+
@record_store[record_class]
|
287
|
+
end
|
288
|
+
|
289
|
+
return candidate_records.to_a if non_indexed_fields.empty?
|
290
|
+
|
291
|
+
candidate_records.select do |record|
|
292
|
+
non_indexed_fields.all? do |k, v|
|
293
|
+
expected_value = Persistors.normalize_symbols(v)
|
294
|
+
actual_value = Persistors.normalize_symbols(record[k])
|
295
|
+
if expected_value.is_a?(Array)
|
296
|
+
expected_value.include?(actual_value)
|
297
|
+
else
|
298
|
+
actual_value == expected_value
|
303
299
|
end
|
304
300
|
end
|
305
|
-
end
|
301
|
+
end
|
306
302
|
end
|
307
303
|
|
308
304
|
def last_record(record_class, where_clause)
|
@@ -310,6 +306,10 @@ module Sequent
|
|
310
306
|
results.empty? ? nil : results.last
|
311
307
|
end
|
312
308
|
|
309
|
+
def prepare
|
310
|
+
# noop
|
311
|
+
end
|
312
|
+
|
313
313
|
def commit
|
314
314
|
@record_store.each do |clazz, records|
|
315
315
|
@column_cache ||= {}
|
@@ -358,7 +358,7 @@ module Sequent
|
|
358
358
|
|
359
359
|
def clear
|
360
360
|
@record_store.clear
|
361
|
-
@record_index.clear
|
361
|
+
@record_index.values.each(&:clear)
|
362
362
|
end
|
363
363
|
|
364
364
|
private
|
@@ -366,12 +366,19 @@ module Sequent
|
|
366
366
|
def cast_value_to_column_type(clazz, column_name, record)
|
367
367
|
uncasted_value = ActiveModel::Attribute.from_database(
|
368
368
|
column_name,
|
369
|
-
record[column_name
|
369
|
+
record[column_name],
|
370
370
|
Sequent::ApplicationRecord.connection.lookup_cast_type_from_column(@column_cache[clazz.name][column_name]),
|
371
371
|
).value_for_database
|
372
372
|
Sequent::ApplicationRecord.connection.type_cast(uncasted_value)
|
373
373
|
end
|
374
374
|
end
|
375
|
+
|
376
|
+
# Normalizes symbol values to strings (by using its name) while
|
377
|
+
# preserving all other values. This allows symbol/string
|
378
|
+
# indifferent comparisons.
|
379
|
+
def self.normalize_symbols(value)
|
380
|
+
value.is_a?(Symbol) ? value.name : value
|
381
|
+
end
|
375
382
|
end
|
376
383
|
end
|
377
384
|
end
|