cassandra_model 0.9.3.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (34) hide show
  1. checksums.yaml +7 -0
  2. data/LICENSE.txt +13 -0
  3. data/README.md +170 -0
  4. data/lib/cassandra_model.rb +48 -0
  5. data/lib/cassandra_model/batch_reactor.rb +32 -0
  6. data/lib/cassandra_model/batch_reactor/future.rb +49 -0
  7. data/lib/cassandra_model/composite_record.rb +49 -0
  8. data/lib/cassandra_model/composite_record_static.rb +169 -0
  9. data/lib/cassandra_model/connection_cache.rb +24 -0
  10. data/lib/cassandra_model/counter_record.rb +58 -0
  11. data/lib/cassandra_model/data_inquirer.rb +105 -0
  12. data/lib/cassandra_model/data_modelling.rb +45 -0
  13. data/lib/cassandra_model/data_set.rb +84 -0
  14. data/lib/cassandra_model/displayable_attributes.rb +44 -0
  15. data/lib/cassandra_model/global_callbacks.rb +39 -0
  16. data/lib/cassandra_model/logging.rb +8 -0
  17. data/lib/cassandra_model/meta_columns.rb +162 -0
  18. data/lib/cassandra_model/meta_table.rb +66 -0
  19. data/lib/cassandra_model/query_builder.rb +122 -0
  20. data/lib/cassandra_model/query_helper.rb +44 -0
  21. data/lib/cassandra_model/query_result.rb +23 -0
  22. data/lib/cassandra_model/raw_connection.rb +163 -0
  23. data/lib/cassandra_model/record.rb +551 -0
  24. data/lib/cassandra_model/result_paginator.rb +37 -0
  25. data/lib/cassandra_model/rotating_table.rb +49 -0
  26. data/lib/cassandra_model/single_token_batch.rb +23 -0
  27. data/lib/cassandra_model/single_token_counter_batch.rb +5 -0
  28. data/lib/cassandra_model/single_token_logged_batch.rb +5 -0
  29. data/lib/cassandra_model/single_token_unlogged_batch.rb +5 -0
  30. data/lib/cassandra_model/table_definition.rb +72 -0
  31. data/lib/cassandra_model/table_descriptor.rb +49 -0
  32. data/lib/cassandra_model/table_redux.rb +58 -0
  33. data/lib/cassandra_model/type_guessing.rb +40 -0
  34. metadata +133 -0
@@ -0,0 +1,66 @@
1
+ module CassandraModel
2
+ class MetaTable < TableRedux
3
+ def initialize(connection_name = nil, table_definition)
4
+ @table_definition = table_definition
5
+ @connection_name = connection_name
6
+ end
7
+
8
+ def reset_local_schema!
9
+ raise Cassandra::Errors::ClientError, 'Schema changes are not supported for meta tables'
10
+ end
11
+
12
+ def name
13
+ @name ||= begin
14
+ create_table
15
+ name_in_cassandra
16
+ end
17
+ end
18
+
19
+ def ==(rhs)
20
+ connection == rhs.connection &&
21
+ table_definition == rhs.table_definition
22
+ end
23
+
24
+ protected
25
+
26
+ attr_reader :table_definition
27
+
28
+ private
29
+
30
+ def table
31
+ @table ||= create_table
32
+ end
33
+
34
+ def keyspace
35
+ connection.keyspace
36
+ end
37
+
38
+ def create_table
39
+ descriptor = TableDescriptor.create(@table_definition)
40
+ create_cassandra_table(descriptor) if descriptor.valid
41
+ 100.times do
42
+ sleep 0.100
43
+ break if keyspace.table(name_in_cassandra)
44
+ end
45
+ keyspace.table(name_in_cassandra) or raise "Could not verify the creation of table #{name_in_cassandra}"
46
+ end
47
+
48
+ def create_cassandra_table(descriptor)
49
+ begin
50
+ connection.session.execute(create_table_cql)
51
+ rescue
52
+ descriptor.delete
53
+ raise
54
+ end
55
+ end
56
+
57
+ def create_table_cql
58
+ @table_definition.to_cql(check_exists: true)
59
+ end
60
+
61
+ def name_in_cassandra
62
+ @table_definition.name_in_cassandra
63
+ end
64
+
65
+ end
66
+ end
@@ -0,0 +1,122 @@
1
+ module CassandraModel
2
+ class QueryBuilder
3
+ include Enumerable
4
+ extend Forwardable
5
+
6
+ def_delegator :async, :each
7
+
8
+ def initialize(record_klass)
9
+ @record_klass = record_klass
10
+ @params = {}
11
+ @options = {}
12
+ end
13
+
14
+ def async
15
+ @record_klass.request_async(@params, @options)
16
+ end
17
+
18
+ def get
19
+ @record_klass.request(@params, @options)
20
+ end
21
+
22
+ def to_cql
23
+ @record_klass.request_meta(@params, @options).first
24
+ end
25
+
26
+ def inspect
27
+ results = limit(@options[:limit] || 10).get
28
+ "#<#{self.class.to_s}: #{inspected_results(results)}>"
29
+ end
30
+
31
+ def first_async
32
+ @record_klass.first_async(@params, @options)
33
+ end
34
+
35
+ def first
36
+ @record_klass.first(@params, @options)
37
+ end
38
+
39
+ def create_async(attributes = {}, create_options = {})
40
+ @record_klass.create_async(@params.merge(attributes), @options.merge(create_options))
41
+ end
42
+
43
+ def create(attributes = {}, create_options = {})
44
+ @record_klass.create(@params.merge(attributes), @options.merge(create_options))
45
+ end
46
+
47
+ def new(attributes)
48
+ @record_klass.new(@params.merge(attributes))
49
+ end
50
+
51
+ def first_or_new_async(attributes)
52
+ first_async.then do |result|
53
+ result || new(attributes)
54
+ end
55
+ end
56
+
57
+ def first_or_new(attributes)
58
+ first_or_new_async(attributes).get
59
+ end
60
+
61
+ def check_exists
62
+ @options.merge!(check_exists: true)
63
+ self
64
+ end
65
+
66
+ def pluck(*columns)
67
+ query = select(*columns)
68
+ if columns.length == 1
69
+ query.map { |result| pluck_values(columns, result).first }
70
+ else
71
+ query.map { |result| pluck_values(columns, result) }
72
+ end
73
+ end
74
+
75
+ def each_slice(slice_size = nil, &block)
76
+ paginate(slice_size).async.each_slice(&block)
77
+ end
78
+
79
+ def where(params)
80
+ @params.merge!(params)
81
+ self
82
+ end
83
+
84
+ def select(*columns)
85
+ @options[:select] ||= []
86
+ @options[:select].concat(columns)
87
+ self
88
+ end
89
+
90
+ def order(*columns)
91
+ @options[:order_by] ||= []
92
+ @options[:order_by].concat(columns)
93
+ self
94
+ end
95
+
96
+ def limit(limit)
97
+ @options[:limit] = limit
98
+ self
99
+ end
100
+
101
+ def trace(trace)
102
+ @options[:trace] = trace
103
+ self
104
+ end
105
+
106
+ def paginate(page_size)
107
+ @options[:page_size] = page_size
108
+ self
109
+ end
110
+
111
+ private
112
+
113
+ def pluck_values(columns, result)
114
+ result.attributes.slice(*columns).values
115
+ end
116
+
117
+ def inspected_results(results)
118
+ "[#{(results.map(&:to_s) + %w(...)) * ', '}]"
119
+ end
120
+
121
+ end
122
+ end
@@ -0,0 +1,44 @@
1
+ module CassandraModel
2
+ module QueryHelper
3
+
4
+ def self.def_query_helper(name)
5
+ define_method(name) do |*args|
6
+ QueryBuilder.new(self).send(name, *args)
7
+ end
8
+ end
9
+
10
+ def_query_helper(:where)
11
+ def_query_helper(:select)
12
+ def_query_helper(:pluck)
13
+ def_query_helper(:paginate)
14
+ def_query_helper(:each_slice)
15
+ def_query_helper(:limit)
16
+ def_query_helper(:order)
17
+
18
+ def find_by(attributes)
19
+ where(attributes).first
20
+ end
21
+
22
+ def all
23
+ where({})
24
+ end
25
+
26
+ def after(record)
27
+ next_cluster(:gt, record)
28
+ end
29
+
30
+ def before(record)
31
+ next_cluster(:lt, record)
32
+ end
33
+
34
+ private
35
+
36
+ def next_cluster(operator, record)
37
+ partition_key = record.partition_key
38
+ clustering_columns = record.clustering_columns
39
+ cluster_comparer = {clustering_columns.keys.public_send(operator) => clustering_columns.values}
40
+ where(partition_key.merge(cluster_comparer))
41
+ end
42
+
43
+ end
44
+ end
@@ -0,0 +1,23 @@
1
+ module CassandraModel
2
+ class QueryResult
3
+ @@class_cache = {}
4
+
5
+ attr_reader :attributes
6
+
7
+ def initialize(attributes)
8
+ @attributes = attributes
9
+ end
10
+
11
+ def self.create(attributes)
12
+ columns = attributes.keys
13
+ klass = (@@class_cache[columns] ||= Class.new(QueryResult))
14
+ result = klass.new(attributes)
15
+ columns.each { |column| klass.send(:define_method, column.to_sym) { self.attributes[column] } }
16
+ result
17
+ end
18
+
19
+ def ==(rhs)
20
+ attributes == rhs.attributes
21
+ end
22
+ end
23
+ end
@@ -0,0 +1,163 @@
1
+ module CassandraModel
2
+ class RawConnection
3
+ CLUSTER_MUTEX = Mutex.new
4
+ SESSION_MUTEX = Mutex.new
5
+ CONFIG_MUTEX = Mutex.new
6
+ STATEMENT_MUTEX = Mutex.new
7
+ REACTOR_MUTEX = Mutex.new
8
+
9
+ DEFAULT_CONFIGURATION = {
10
+ hosts: %w(localhost),
11
+ keyspace: 'default_keyspace',
12
+ keyspace_options: {
13
+ class: 'SimpleStrategy',
14
+ replication_factor: 1
15
+ },
16
+ port: '9042',
17
+ consistency: :one,
18
+ connection_timeout: 10,
19
+ timeout: 10
20
+ }.freeze
21
+
22
+ def initialize(config_name = nil)
23
+ @config_name = config_name
24
+ @statement_cache = {}
25
+ end
26
+
27
+ def config=(value)
28
+ CONFIG_MUTEX.synchronize { @config = DEFAULT_CONFIGURATION.merge(value) }
29
+ end
30
+
31
+ def config
32
+ safe_getset_variable(CONFIG_MUTEX, :@config) { load_config }
33
+ end
34
+
35
+ def cluster
36
+ safe_getset_variable(CLUSTER_MUTEX, :@cluster) do
37
+ connection_configuration = config.slice(:hosts,
38
+ :compression,
39
+ :consistency,
40
+ :connection_timeout, :timeout,
41
+ :username, :password,
42
+ :address_resolution)
43
+ connection_configuration.merge!(logger: Logging.logger)
44
+ Cassandra.cluster(connection_configuration)
45
+ end
46
+ end
47
+
48
+ def session
49
+ safe_getset_variable(SESSION_MUTEX, :@session) { cluster.connect(config[:keyspace]) }
50
+ end
51
+
52
+ def keyspace
53
+ cluster.keyspace(keyspace_name) || create_keyspace
54
+ end
55
+
56
+ def unlogged_batch_reactor
57
+ reactor(:@unlogged_reactor, SingleTokenUnloggedBatch)
58
+ end
59
+
60
+ def logged_batch_reactor
61
+ reactor(:@logged_reactor, SingleTokenLoggedBatch)
62
+ end
63
+
64
+ def counter_batch_reactor
65
+ reactor(:@counter_reactor, SingleTokenCounterBatch)
66
+ end
67
+
68
+ def statement(query)
69
+ statement_cache[query] || begin
70
+ STATEMENT_MUTEX.synchronize { statement_cache[query] ||= session.prepare(query) }
71
+ end
72
+ end
73
+
74
+ def shutdown
75
+ @shutdown = true
76
+ REACTOR_MUTEX.synchronize do
77
+ @unlogged_reactor.stop.get if @unlogged_reactor
78
+ @unlogged_reactor = nil
79
+
80
+ @logged_reactor.stop.get if @logged_reactor
81
+ @logged_reactor = nil
82
+
83
+ @counter_reactor.stop.get if @counter_reactor
84
+ @counter_reactor = nil
85
+ end
86
+ SESSION_MUTEX.synchronize do
87
+ @session.close if @session
88
+ @session = nil
89
+ end
90
+ CLUSTER_MUTEX.synchronize do
91
+ @cluster.close if @cluster
92
+ @cluster = nil
93
+ end
94
+ end
95
+
96
+ private
97
+
98
+ attr_reader :statement_cache
99
+
100
+ def create_keyspace
101
+ cluster.connect.execute(create_keyspace_query)
102
+ sleep 0.1 until (keyspace = cluster.keyspace(keyspace_name))
103
+ keyspace
104
+ end
105
+
106
+ def create_keyspace_query
107
+ "CREATE KEYSPACE IF NOT EXISTS #{keyspace_name} WITH REPLICATION = #{keyspace_options};"
108
+ end
109
+
110
+ def keyspace_options
111
+ keyspace_options = config[:keyspace_options].map do |key, value|
112
+ value = "'#{value}'" if value.is_a?(String)
113
+ "'#{key}' : #{value}"
114
+ end * ', '
115
+ "{ #{keyspace_options} }"
116
+ end
117
+
118
+ def keyspace_name
119
+ config[:keyspace]
120
+ end
121
+
122
+ def reactor(name, type)
123
+ safe_getset_variable(REACTOR_MUTEX, name) do
124
+ BatchReactor.new(cluster, session, type, config[:batch_reactor] || {}).tap do |reactor|
125
+ reactor.start.get
126
+ end
127
+ end
128
+ end
129
+
130
+ def safe_getset_variable(mutex, name, &block)
131
+ result = instance_variable_get(name)
132
+ return result if result
133
+
134
+ mutex.synchronize do
135
+ raise Cassandra::Errors::InvalidError.new('Connection invalidated!', 'Dummy') if !!@shutdown
136
+
137
+ result = instance_variable_get(name)
138
+ return result if result
139
+
140
+ instance_variable_set(name, block.call)
141
+ end
142
+ end
143
+
144
+ def load_config
145
+ if File.exists?(config_path)
146
+ config = yaml_config || {}
147
+ DEFAULT_CONFIGURATION.merge(config)
148
+ else
149
+ DEFAULT_CONFIGURATION
150
+ end
151
+ end
152
+
153
+ def yaml_config
154
+ yaml_config = File.open(config_path) { |file| YAML.load(file.read) }
155
+ yaml_config = yaml_config[Rails.env] if defined?(Rails)
156
+ yaml_config
157
+ end
158
+
159
+ def config_path
160
+ @config_name ? "./config/cassandra/#{@config_name}.yml" : './config/cassandra.yml'
161
+ end
162
+ end
163
+ end
@@ -0,0 +1,551 @@
1
+ require_relative 'query_helper'
2
+ require_relative 'meta_columns'
3
+
4
+ module CassandraModel
5
+ class Record
6
+ extend CassandraModel::QueryHelper
7
+ include CassandraModel::MetaColumns
8
+ include CassandraModel::DisplayableAttributes
9
+
10
+ attr_reader :attributes, :valid, :execution_info
11
+
12
+ Attributes = Struct.new(
13
+ :table,
14
+
15
+ :columns,
16
+ :counter_columns,
17
+
18
+ :internal_defaults,
19
+ :composite_columns,
20
+ :composite_pk_map,
21
+ :composite_ck_map,
22
+
23
+ :composite_partition_key,
24
+ :composite_clustering_columns,
25
+ :composite_primary_key,
26
+
27
+ :composite_shard_key,
28
+ ) # Using this instead of OpenStruct, as there seems to be a bug in JRuby that causes this to get mangled over time
29
+ ConfigureableAttributes = Struct.new(
30
+ :table_name,
31
+ :connection_name,
32
+
33
+ :write_consistency,
34
+ :read_consistency,
35
+
36
+ :before_save_callbacks,
37
+
38
+ :deferred_columns,
39
+ :deferred_column_readers,
40
+ :deferred_column_writers,
41
+ :async_deferred_column_readers,
42
+ :async_deferred_column_writers,
43
+
44
+ :composite_defaults,
45
+
46
+ :batch_type,
47
+
48
+ :display_attributes,
49
+ )
50
+
51
+ def initialize(attributes = {}, options = {validate: true})
52
+ ensure_attributes_accessible!
53
+ validate_attributes!(attributes) if options[:validate]
54
+ @execution_info = options[:execution_info]
55
+ @valid = true
56
+ @attributes = attributes.deep_dup
57
+ after_initialize
58
+ end
59
+
60
+ def save_async(options = {})
61
+ internal_save_async(options)
62
+ end
63
+
64
+ def delete_async
65
+ internal_delete_async
66
+ end
67
+
68
+ def update_async(new_attributes)
69
+ internal_update_async(new_attributes)
70
+ end
71
+
72
+ def invalidate!
73
+ @valid = false
74
+ end
75
+
76
+ def save(options = {})
77
+ save_async(options).get
78
+ end
79
+
80
+ alias :save! :save
81
+
82
+ def delete
83
+ delete_async.get
84
+ end
85
+
86
+ def update(new_attributes)
87
+ update_async(new_attributes).get
88
+ end
89
+
90
+ def partition_key
91
+ attributes.slice(*self.class.partition_key)
92
+ end
93
+
94
+ def clustering_columns
95
+ attributes.slice(*self.class.clustering_columns)
96
+ end
97
+
98
+ def primary_key
99
+ attributes.slice(*self.class.primary_key)
100
+ end
101
+
102
+ def inspect
103
+ %Q{#<#{self.class.to_s}#{inspected_validation} #{inspected_attributes}>}
104
+ end
105
+
106
+ alias :to_s :inspect
107
+
108
+ def ==(rhs)
109
+ rhs.respond_to?(:attributes) && @attributes == rhs.attributes
110
+ end
111
+
112
+ private
113
+
114
+ def inspected_validation
115
+ '(Invalidated)' unless valid
116
+ end
117
+
118
+ def inspected_attributes
119
+ attributes.map do |key, value|
120
+ %Q{#{key}: "#{value.to_s.truncate(53)}"}
121
+ end * ', '
122
+ end
123
+
124
+ protected
125
+
126
+ def table
127
+ self.class.table
128
+ end
129
+
130
+ def session
131
+ table.connection.session
132
+ end
133
+
134
+ def statement(query)
135
+ table.connection.statement(query)
136
+ end
137
+
138
+ def validate_attributes!(attributes)
139
+ valid_columns = columns + deferred_columns
140
+ attributes.keys.each do |column|
141
+ column = column.key if column.is_a?(ThomasUtils::KeyIndexer)
142
+ raise "Invalid column '#{column}' specified" unless valid_columns.include?(column)
143
+ end
144
+ end
145
+
146
+ def internal_delete_async
147
+ @valid = false
148
+
149
+ statement = statement(self.class.query_for_delete)
150
+ attributes = internal_attributes
151
+ column_values = table.primary_key.map { |column| attributes[column] }
152
+
153
+ future = if batch_reactor
154
+ execute_async_in_batch(statement, column_values)
155
+ else
156
+ session.execute_async(statement, *column_values, write_query_options)
157
+ end
158
+ future.then { self }
159
+ end
160
+
161
+ def internal_save_async(options = {})
162
+ raise 'Cannot save invalidated record!' unless valid
163
+
164
+ self.class.before_save_callbacks.map { |proc| instance_eval(&proc) }
165
+ if !options[:skip_deferred_columns] && (self.class.deferred_column_writers || self.class.async_deferred_column_writers)
166
+ promise = Cassandra::Future.promise
167
+ ThomasUtils::Future.new do
168
+ begin
169
+ promise.fulfill(save_deferred_columns)
170
+ rescue Exception => e
171
+ promise.break(e)
172
+ end
173
+ end
174
+ promise.future.then { save_row_async(options) }.then do |result|
175
+ @execution_info = result.execution_info
176
+ execute_callback(:record_saved)
177
+ self
178
+ end
179
+ else
180
+ save_row_async(options).then do |result|
181
+ invalidate! if save_rejected?(result)
182
+ @execution_info = result.execution_info
183
+ execute_callback(:record_saved)
184
+ self
185
+ end
186
+ end
187
+ end
188
+
189
+ def save_rejected?(result)
190
+ save_result = result.first
191
+ save_result && save_result['[applied]'] == false
192
+ end
193
+
194
+ def internal_update_async(new_attributes)
195
+ validate_attributes!(new_attributes)
196
+
197
+ query = self.class.query_for_update(new_attributes)
198
+ statement = statement(query)
199
+ attributes = internal_attributes
200
+ column_values = table.primary_key.map { |column| attributes[column] }
201
+
202
+ future = if batch_reactor
203
+ execute_async_in_batch(statement, new_attributes.values + column_values)
204
+ else
205
+ session.execute_async(statement, *new_attributes.values, *column_values, write_query_options)
206
+ end
207
+ future.then do
208
+ self.attributes.merge!(new_attributes)
209
+ self
210
+ end
211
+ end
212
+
213
+ def write_query_options(options = {})
214
+ {}.tap do |new_option|
215
+ new_option[:consistency] = write_consistency if write_consistency
216
+ new_option[:trace] = true if options[:trace]
217
+ end
218
+ end
219
+
220
+ def write_consistency
221
+ self.class.write_consistency
222
+ end
223
+
224
+ def column_values
225
+ attributes = internal_attributes
226
+ internal_columns.map { |column| attributes[column] }
227
+ end
228
+
229
+ def internal_attributes
230
+ attributes
231
+ end
232
+
233
+ def save_row_async(options)
234
+ statement = statement(query_for_save(options))
235
+ future = if batch_reactor
236
+ execute_async_in_batch(statement, column_values)
237
+ else
238
+ session.execute_async(statement, *column_values, write_query_options(options))
239
+ end
240
+ future.on_failure do |error|
241
+ Logging.logger.error("Error saving #{self.class}: #{error}")
242
+ execute_callback(:save_record_failed, error)
243
+ end
244
+ end
245
+
246
+ def execute_callback(callback, *extra_params)
247
+ GlobalCallbacks.call(callback, self, *extra_params)
248
+ end
249
+
250
+ def execute_async_in_batch(statement, column_values)
251
+ bound_statement = statement.bind(*column_values)
252
+ batch_reactor.perform_within_batch(bound_statement) do |batch|
253
+ batch.add(bound_statement)
254
+ batch
255
+ end
256
+ end
257
+
258
+ def batch_reactor
259
+ if self.class.batch_type == :logged
260
+ table.connection.logged_batch_reactor
261
+ elsif self.class.batch_type == :unlogged
262
+ table.connection.unlogged_batch_reactor
263
+ elsif self.class.batch_type == :counter
264
+ table.connection.counter_batch_reactor
265
+ end
266
+ end
267
+
268
+ def save_deferred_columns
269
+ self.class.save_deferred_columns(self)
270
+ deferred_column_futures = self.class.save_async_deferred_columns(self)
271
+ deferred_column_futures.map(&:get) if deferred_column_futures
272
+ end
273
+
274
+ def query_for_save(options)
275
+ self.class.query_for_save(options)
276
+ end
277
+
278
+ def columns
279
+ self.class.columns
280
+ end
281
+
282
+ alias :ensure_attributes_accessible! :columns
283
+
284
+ def internal_columns
285
+ self.class.internal_columns
286
+ end
287
+
288
+ def shard_key
289
+ self.class.shard_key
290
+ end
291
+
292
+ def column_hash(hashing_column)
293
+ Digest::MD5.hexdigest(attributes[hashing_column].to_s).unpack('L').first
294
+ end
295
+
296
+ class << self
297
+ extend Forwardable
298
+
299
+ def_delegator :table, :partition_key, :internal_partition_key
300
+ def_delegator :table, :clustering_columns, :internal_clustering_columns
301
+ def_delegator :table, :primary_key, :internal_primary_key
302
+ def_delegator :table, :name, :table_name
303
+ def_delegator :table, :columns, :internal_columns
304
+ def_delegators :table_config, :write_consistency, :read_consistency, :write_consistency=, :read_consistency=
305
+
306
+ alias :partition_key :internal_partition_key
307
+ alias :clustering_columns :internal_clustering_columns
308
+ alias :primary_key :internal_primary_key
309
+
310
+ def table_name=(value)
311
+ table_config.table_name = value
312
+ end
313
+
314
+ def connection_name=(value)
315
+ table_config.connection_name = value
316
+ end
317
+
318
+ def table=(value)
319
+ table_data.table = value
320
+ end
321
+
322
+ def table
323
+ table_data.table ||= begin
324
+ table_name = table_config.table_name || generate_table_name
325
+ TableRedux.new(table_config.connection_name, table_name)
326
+ end
327
+ end
328
+
329
+ def save_in_batch(type)
330
+ table_config.batch_type = type
331
+ end
332
+
333
+ def batch_type
334
+ table_config.batch_type
335
+ end
336
+
337
+ def columns
338
+ table_data.columns ||= internal_columns.tap do |columns|
339
+ columns.each { |column| define_attribute(column) }
340
+ end
341
+ end
342
+
343
+ def query_for_save(options = {})
344
+ existence_clause = options[:check_exists] && ' IF NOT EXISTS'
345
+ column_names = internal_columns.join(', ')
346
+ column_sanitizers = (%w(?) * internal_columns.size).join(', ')
347
+ save_query = "INSERT INTO #{table_name} (#{column_names}) VALUES (#{column_sanitizers})"
348
+ "#{save_query}#{existence_clause}"
349
+ end
350
+
351
+ def query_for_delete
352
+ where_clause = table.primary_key.map { |column| "#{column} = ?" }.join(' AND ')
353
+ "DELETE FROM #{table_name} WHERE #{where_clause}"
354
+ end
355
+
356
+ def query_for_update(new_attributes)
357
+ where_clause = table.primary_key.map { |column| "#{column} = ?" }.join(' AND ')
358
+ set_clause = new_attributes.keys.map { |column| "#{column} = ?" }.join(', ')
359
+ "UPDATE #{table_name} SET #{set_clause} WHERE #{where_clause}"
360
+ end
361
+
362
+ def create_async(attributes, options = {})
363
+ self.new(attributes).save_async(options)
364
+ end
365
+
366
+ def create(attributes, options = {})
367
+ create_async(attributes, options).get
368
+ end
369
+
370
+ alias :create! :create
371
+
372
+ def request_async(clause, options = {})
373
+ page_size = options[:page_size]
374
+ trace = options[:trace]
375
+ request_query, invalidated_result, where_values = request_meta(clause, options)
376
+ statement = statement(request_query)
377
+
378
+ query_options = {}
379
+ query_options[:page_size] = page_size if page_size
380
+ query_options[:consistency] = read_consistency if read_consistency
381
+ query_options[:trace] = trace if trace
382
+
383
+ future = session.execute_async(statement, *where_values, query_options)
384
+ if options[:limit] == 1
385
+ single_result_row_future(future, invalidated_result)
386
+ else
387
+ paginator_result_future(future, invalidated_result)
388
+ end
389
+ end
390
+
391
+ def request_meta(clause, options)
392
+ where_clause, where_values = where_params(clause)
393
+ select_clause, use_query_result = select_params(options)
394
+ order_by_clause = order_by_clause(options[:order_by])
395
+ limit_clause = limit_clause(options)
396
+ request_query = "SELECT #{select_clause} FROM #{table_name}#{where_clause}#{order_by_clause}#{limit_clause}"
397
+ [request_query, use_query_result, where_values]
398
+ end
399
+
400
+ def order_by_clause(order_by)
401
+ " ORDER BY #{multi_csv_clause(order_by)}" if order_by
402
+ end
403
+
404
+ def first_async(clause = {}, options = {})
405
+ request_async(clause, options.merge(limit: 1))
406
+ end
407
+
408
+ def request(clause, options = {})
409
+ request_async(clause, options).get
410
+ end
411
+
412
+ def first(clause = {}, options = {})
413
+ first_async(clause, options).get
414
+ end
415
+
416
+ def shard(hashing_column = nil, max_shard = nil, &block)
417
+ if hashing_column
418
+ if block_given?
419
+ hashing_shard(hashing_column, &block)
420
+ else
421
+ modulo_shard(hashing_column, max_shard)
422
+ end
423
+ else
424
+ manual_shard(&block)
425
+ end
426
+ end
427
+
428
+ def before_save(&block)
429
+ before_save_callbacks << block
430
+ end
431
+
432
+ def before_save_callbacks
433
+ table_config.before_save_callbacks ||= []
434
+ end
435
+
436
+ def shard_key
437
+ partition_key.last
438
+ end
439
+
440
+ protected
441
+
442
+ def table_data
443
+ @table_data ||= Attributes.new
444
+ end
445
+
446
+ def table_config
447
+ @table_config ||= ConfigureableAttributes.new
448
+ end
449
+
450
+ def session
451
+ table.connection.session
452
+ end
453
+
454
+ def statement(query)
455
+ table.connection.statement(query)
456
+ end
457
+
458
+ def generate_table_name
459
+ self.name.demodulize.underscore.pluralize
460
+ end
461
+
462
+ def define_attribute(column)
463
+ define_method(:"#{column}=") { |value| self.attributes[column] = value }
464
+ define_method(column.to_sym) { self.attributes[column] }
465
+ end
466
+
467
+ def paginator_result_future(future, invalidated_result)
468
+ ResultPaginator.new(future) { |row, execution_info| record_from_result(row, execution_info, invalidated_result) }
469
+ end
470
+
471
+ def single_result_row_future(future, invalidated_result)
472
+ future.then do |rows|
473
+ record_from_result(rows.first, rows.execution_info, invalidated_result) if rows.first
474
+ end
475
+ end
476
+
477
+ def limit_clause(options)
478
+ limit = options[:limit]
479
+ if limit
480
+ integer_limit = limit.to_i
481
+ raise "Invalid limit '#{limit}'" if integer_limit < 1
482
+ " LIMIT #{integer_limit}"
483
+ end
484
+ end
485
+
486
+ def select_params(options)
487
+ select = options[:select]
488
+ [select_clause(select), !!select]
489
+ end
490
+
491
+ def select_clause(select)
492
+ select ? multi_csv_clause(select) : '*'
493
+ end
494
+
495
+ def multi_csv_clause(select)
496
+ select.is_a?(Array) ? select.join(', ') : select
497
+ end
498
+
499
+ def where_params(clause)
500
+ where_clause = where_clause(clause) if clause.size > 0
501
+ where_values = clause.values.flatten(1)
502
+ [where_clause, where_values]
503
+ end
504
+
505
+ def where_clause(clause)
506
+ restriction = clause.map do |key, value|
507
+ if key.is_a?(ThomasUtils::KeyComparer)
508
+ value.is_a?(Array) ? "#{key} (#{array_value_param_splat(value)})" : "#{key} ?"
509
+ else
510
+ value.is_a?(Array) ? multi_value_restriction(key, value) : single_value_restriction(key)
511
+ end
512
+ end.join(' AND ')
513
+ " WHERE #{restriction}"
514
+ end
515
+
516
+ def single_value_restriction(key)
517
+ "#{key} = ?"
518
+ end
519
+
520
+ def multi_value_restriction(key, value)
521
+ "#{key} IN (#{array_value_param_splat(value)})"
522
+ end
523
+
524
+ def array_value_param_splat(value)
525
+ (%w(?) * value.count) * ', '
526
+ end
527
+
528
+ def record_from_result(row, execution_info, invalidate_result)
529
+ attributes = row_attributes(row)
530
+ new(attributes, execution_info: execution_info).tap { |result| result.invalidate! if invalidate_result }
531
+ end
532
+
533
+ def row_attributes(row)
534
+ row.symbolize_keys
535
+ end
536
+
537
+ def manual_shard(&block)
538
+ before_save { attributes[shard_key] = instance_eval(&block) }
539
+ end
540
+
541
+ def modulo_shard(hashing_column, max_shard)
542
+ before_save { attributes[shard_key] = (column_hash(hashing_column) % max_shard) }
543
+ end
544
+
545
+ def hashing_shard(hashing_column)
546
+ before_save { attributes[shard_key] = (yield column_hash(hashing_column)) }
547
+ end
548
+
549
+ end
550
+ end
551
+ end