cequel 1.10.0 → 2.0.0

Sign up to get free protection for your applications and to get access to all the features.
Files changed (40) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +11 -0
  3. data/Gemfile +1 -0
  4. data/Gemfile.lock +93 -65
  5. data/README.md +26 -5
  6. data/Vagrantfile +2 -2
  7. data/lib/cequel/errors.rb +2 -0
  8. data/lib/cequel/instrumentation.rb +5 -4
  9. data/lib/cequel/metal/batch.rb +21 -18
  10. data/lib/cequel/metal/data_set.rb +17 -28
  11. data/lib/cequel/metal/inserter.rb +3 -2
  12. data/lib/cequel/metal/keyspace.rb +56 -33
  13. data/lib/cequel/metal/request_logger.rb +22 -8
  14. data/lib/cequel/metal/row_specification.rb +9 -8
  15. data/lib/cequel/metal/statement.rb +23 -7
  16. data/lib/cequel/metal/updater.rb +12 -10
  17. data/lib/cequel/metal/writer.rb +5 -13
  18. data/lib/cequel/record/association_collection.rb +6 -33
  19. data/lib/cequel/record/collection.rb +2 -1
  20. data/lib/cequel/record/errors.rb +6 -0
  21. data/lib/cequel/record/persistence.rb +2 -2
  22. data/lib/cequel/record/record_set.rb +3 -4
  23. data/lib/cequel/record/validations.rb +5 -5
  24. data/lib/cequel/schema/table.rb +3 -5
  25. data/lib/cequel/schema/table_reader.rb +73 -111
  26. data/lib/cequel/schema/table_updater.rb +9 -15
  27. data/lib/cequel/version.rb +1 -1
  28. data/spec/examples/metal/data_set_spec.rb +34 -46
  29. data/spec/examples/metal/keyspace_spec.rb +8 -6
  30. data/spec/examples/record/associations_spec.rb +8 -18
  31. data/spec/examples/record/persistence_spec.rb +6 -6
  32. data/spec/examples/record/record_set_spec.rb +39 -12
  33. data/spec/examples/record/timestamps_spec.rb +12 -5
  34. data/spec/examples/schema/keyspace_spec.rb +13 -37
  35. data/spec/examples/schema/table_reader_spec.rb +4 -1
  36. data/spec/examples/schema/table_updater_spec.rb +22 -7
  37. data/spec/examples/schema/table_writer_spec.rb +2 -3
  38. data/spec/examples/spec_helper.rb +1 -0
  39. data/spec/examples/spec_support/preparation_spec.rb +14 -7
  40. metadata +7 -8
@@ -45,7 +45,8 @@ module Cequel
45
45
  # @see DataSet#list_prepend
46
46
  #
47
47
  def list_prepend(column, elements)
48
- statements << "#{column} = [?] + #{column}"
48
+ elements = Array(elements)
49
+ statements << "#{column} = ? + #{column}"
49
50
  bind_vars << elements
50
51
  end
51
52
 
@@ -59,7 +60,8 @@ module Cequel
59
60
  # @see DataSet#list_append
60
61
  #
61
62
  def list_append(column, elements)
62
- statements << "#{column} = #{column} + [?]"
63
+ elements = Array(elements)
64
+ statements << "#{column} = #{column} + ?"
63
65
  bind_vars << elements
64
66
  end
65
67
 
@@ -73,7 +75,8 @@ module Cequel
73
75
  # @see DataSet#list_remove
74
76
  #
75
77
  def list_remove(column, value)
76
- statements << "#{column} = #{column} - [?]"
78
+ value = Array(value)
79
+ statements << "#{column} = #{column} - ?"
77
80
  bind_vars << value
78
81
  end
79
82
 
@@ -102,8 +105,8 @@ module Cequel
102
105
  # @see DataSet#set_add
103
106
  #
104
107
  def set_add(column, values)
105
- statements << "#{column} = #{column} + {?}"
106
- bind_vars << values
108
+ statements << "#{column} = #{column} + ?"
109
+ bind_vars << Set.new(::Kernel.Array(values))
107
110
  end
108
111
 
109
112
  #
@@ -116,8 +119,8 @@ module Cequel
116
119
  # @see DataSet#set_remove
117
120
  #
118
121
  def set_remove(column, values)
119
- statements << "#{column} = #{column} - {?}"
120
- bind_vars << ::Kernel.Array(values)
122
+ statements << "#{column} = #{column} - ?"
123
+ bind_vars << Set.new(::Kernel.Array(values))
121
124
  end
122
125
 
123
126
  #
@@ -130,9 +133,8 @@ module Cequel
130
133
  # @see DataSet#map_update
131
134
  #
132
135
  def map_update(column, updates)
133
- binding_pairs = ::Array.new(updates.length) { '?:?' }.join(',')
134
- statements << "#{column} = #{column} + {#{binding_pairs}}"
135
- bind_vars.concat(updates.flatten)
136
+ statements << "#{column} = #{column} + ?"
137
+ bind_vars << updates
136
138
  end
137
139
 
138
140
  private
@@ -13,6 +13,8 @@ module Cequel
13
13
  class Writer
14
14
  extend Util::Forwardable
15
15
 
16
+ attr_accessor :type_hints
17
+
16
18
  #
17
19
  # @param data_set [DataSet] data set to write to
18
20
  #
@@ -41,8 +43,8 @@ module Cequel
41
43
  consistency = options.fetch(:consistency, data_set.query_consistency)
42
44
  write_to_statement(statement, options)
43
45
  statement.append(*data_set.row_specifications_cql)
44
- data_set.write_with_consistency(
45
- statement.cql, statement.bind_vars, consistency)
46
+ data_set.write_with_options(statement,
47
+ consistency: consistency)
46
48
  end
47
49
 
48
50
  private
@@ -52,17 +54,7 @@ module Cequel
52
54
  def_delegator :statements, :empty?
53
55
 
54
56
  def prepare_upsert_value(value)
55
- case value
56
- when ::Array
57
- yield '[?]', value
58
- when ::Set then
59
- yield '{?}', value.to_a
60
- when ::Hash then
61
- binding_pairs = ::Array.new(value.length) { '?:?' }.join(',')
62
- yield "{#{binding_pairs}}", *value.flatten
63
- else
64
- yield '?', value
65
- end
57
+ yield '?', value
66
58
  end
67
59
 
68
60
  #
@@ -50,41 +50,14 @@ module Cequel
50
50
  end
51
51
 
52
52
  #
53
- # @!method count
54
- # Get the count of child records stored in the database. This method
55
- # will always query Cassandra, even if the records are loaded in
56
- # memory.
53
+ # @raise [DangerousQueryError] to prevent loading the entire record set
54
+ # to be counted
57
55
  #
58
- # @return [Integer] number of child records in the database
59
- # @see #size
60
- # @see #length
61
- #
62
- def_delegator :record_set, :count
63
-
64
- #
65
- # @!method length
66
- # The number of child instances in the in-memory collection. If the
67
- # records are not loaded in memory, they will be loaded and then
68
- # counted.
69
- #
70
- # @return [Integer] length of the loaded record collection in memory
71
- # @see #size
72
- # @see #count
73
- #
74
- def_delegator :entries, :length
75
-
76
- #
77
- # Get the size of the child collection. If the records are loaded in
78
- # memory from a previous operation, count the length of the array in
79
- # memory. If the collection is unloaded, perform a `COUNT` query.
80
- #
81
- # @return [Integer] size of the child collection
82
- # @see #length
83
- # @see #count
84
- #
85
- def size
86
- loaded? ? length : count
56
+ def count
57
+ raise Cequel::Record::DangerousQueryError.new
87
58
  end
59
+ alias_method :length, :count
60
+ alias_method :size, :count
88
61
 
89
62
  #
90
63
  # @return [Boolean] true if this collection's records are loaded in
@@ -332,7 +332,8 @@ module Cequel
332
332
  #
333
333
  def unshift(*objects)
334
334
  objects.map!(&method(:cast_element))
335
- to_update { updater.list_prepend(column_name, objects.reverse) }
335
+ prepared = @model.class.connection.bug8733_version? ? objects.reverse : objects
336
+ to_update { updater.list_prepend(column_name, prepared) }
336
337
  to_modify { super }
337
338
  end
338
339
  alias_method :prepend, :unshift
@@ -44,6 +44,12 @@ module Cequel
44
44
  #
45
45
  IllegalQuery = Class.new(StandardError)
46
46
 
47
+ #
48
+ # Raised when attempting to perform a query that has detrimental effects.
49
+ # Typically when trying to count records.
50
+ #
51
+ DangerousQueryError = Class.new(StandardError)
52
+
47
53
  #
48
54
  # Raised when attempting to persist a Cequel::Record without defining all
49
55
  # primary key columns
@@ -274,8 +274,8 @@ module Cequel
274
274
 
275
275
  def create(options = {})
276
276
  assert_keys_present!
277
- metal_scope
278
- .insert(attributes.reject { |attr, value| value.nil? }, options)
277
+ attributes_for_write = attributes.reject { |attr, value| value.nil? }
278
+ metal_scope.insert(attributes_for_write, options)
279
279
  loaded!
280
280
  persisted!
281
281
  end
@@ -537,11 +537,10 @@ module Cequel
537
537
  end
538
538
  end
539
539
 
540
- #
541
- # @return [Integer] the total number of records in this record set
542
- #
540
+ # @raise [DangerousQueryError] to prevent loading the entire record set
541
+ # to be counted
543
542
  def count
544
- data_set.count
543
+ raise Cequel::Record::DangerousQueryError.new
545
544
  end
546
545
  alias_method :length, :count
547
546
  alias_method :size, :count
@@ -26,7 +26,7 @@ module Cequel
26
26
  included do
27
27
  include ActiveModel::Validations
28
28
  define_model_callbacks :validation
29
- alias_method_chain :valid?, :callbacks
29
+ prepend Callback
30
30
  end
31
31
 
32
32
  #
@@ -80,11 +80,11 @@ module Cequel
80
80
  self.attributes = attributes
81
81
  save!
82
82
  end
83
+ end
83
84
 
84
- private
85
-
86
- def valid_with_callbacks?(context=nil)
87
- run_callbacks(:validation) { valid_without_callbacks? context }
85
+ module Callback
86
+ def valid?(context=nil)
87
+ run_callbacks(:validation) { super context }
88
88
  end
89
89
  end
90
90
  end
@@ -9,11 +9,6 @@ module Cequel
9
9
  # @see Keyspace#read_table
10
10
  #
11
11
  class Table
12
- STORAGE_PROPERTIES = %w(
13
- bloom_filter_fp_chance caching comment compaction compression
14
- dclocal_read_repair_chance gc_grace_seconds read_repair_chance
15
- replicate_on_write
16
- )
17
12
 
18
13
  # @return [Symbol] the name of the table
19
14
  attr_reader :name
@@ -293,8 +288,11 @@ module Cequel
293
288
  end
294
289
 
295
290
  def type(type)
291
+ type = type.kind if type.respond_to?(:kind)
292
+
296
293
  ::Cequel::Type[type]
297
294
  end
295
+
298
296
  end
299
297
  end
300
298
  end
@@ -54,6 +54,7 @@ module Cequel
54
54
  read_clustering_columns
55
55
  read_data_columns
56
56
  read_properties
57
+ read_table_settings
57
58
  table
58
59
  end
59
60
  end
@@ -64,142 +65,103 @@ module Cequel
64
65
 
65
66
  private
66
67
 
67
- # XXX This gets a lot easier in Cassandra 2.0: all logical columns
68
- # (including keys) are returned from the `schema_columns` query, so
69
- # there's no need to jump through all these hoops to figure out what the
70
- # key columns look like.
71
- #
72
- # However, this approach works for both 1.2 and 2.0, so better to keep it
73
- # for now. It will be worth refactoring this code to take advantage of
74
- # 2.0's better interface in a future version of Cequel that targets 2.0+.
75
68
  def read_partition_keys
76
- validators = table_data['key_validator']
77
- types = parse_composite_types(validators) || [validators]
78
- columns = partition_columns.sort_by { |c| c['component_index'] }
79
- .map { |c| c['column_name'] }
80
-
81
- columns.zip(types) do |name, type|
82
- table.add_partition_key(name.to_sym, Type.lookup_internal(type))
69
+ table_data.partition_key.each do |k|
70
+ table.add_partition_key(k.name.to_sym, k.type)
83
71
  end
72
+
84
73
  end
85
74
 
86
- # XXX See comment on {read_partition_keys}
87
75
  def read_clustering_columns
88
- columns = cluster_columns.sort { |l, r| l['component_index'] <=> r['component_index'] }
89
- .map { |c| c['column_name'] }
90
- comparators = parse_composite_types(table_data['comparator'])
91
- unless comparators
92
- table.compact_storage = true
93
- return unless column_data.empty?
94
- columns << :column1 if cluster_columns.empty?
95
- comparators = [table_data['comparator']]
96
- end
97
-
98
- columns.zip(comparators) do |name, type|
99
- if REVERSED_TYPE_PATTERN =~ type
100
- type = $1
101
- clustering_order = :desc
76
+ table_data.clustering_columns.zip(table_data.clustering_order)
77
+ .each do |c,o|
78
+ table.add_clustering_column(c.name.to_sym, c.type, o)
102
79
  end
103
- table.add_clustering_column(
104
- name.to_sym,
105
- Type.lookup_internal(type),
106
- clustering_order
107
- )
108
- end
109
80
  end
110
81
 
111
82
  def read_data_columns
112
- if column_data.empty?
113
- table.add_data_column(
114
- (compact_value['column_name'] || :value).to_sym,
115
- Type.lookup_internal(table_data['default_validator']),
116
- false
117
- )
118
- else
119
- column_data.each do |result|
120
- if COLLECTION_TYPE_PATTERN =~ result['validator']
121
- read_collection_column(
122
- result['column_name'],
123
- $1.underscore,
124
- *$2.split(',')
125
- )
83
+ indexes = Hash[table_data.each_index.map{|i| [i.target, i.name]}]
84
+
85
+ ((table_data.each_column - table_data.partition_key) - table_data.clustering_columns)
86
+ .each do |c|
87
+ next if table.column(c.name.to_sym)
88
+ case c.type
89
+ when Cassandra::Types::Simple
90
+ opts = if indexes[c.name]
91
+ {index: indexes[c.name].to_sym}
92
+ else
93
+ {}
94
+ end
95
+ table.add_data_column(c.name.to_sym, c.type, opts)
96
+ when Cassandra::Types::List
97
+ table.add_list(c.name.to_sym, c.type.value_type)
98
+ when Cassandra::Types::Set
99
+ table.add_set(c.name.to_sym, c.type.value_type)
100
+ when Cassandra::Types::Map
101
+ table.add_map(c.name.to_sym, c.type.key_type, c.type.value_type)
126
102
  else
127
- table.add_data_column(
128
- result['column_name'].to_sym,
129
- Type.lookup_internal(result['validator']),
130
- result['index_name'].try(:to_sym)
131
- )
103
+ fail "Unsupported type #{c.type.inspect}"
132
104
  end
133
105
  end
134
- end
135
106
  end
136
107
 
137
- def read_collection_column(name, collection_type, *internal_types)
138
- types = internal_types
139
- .map { |internal| Type.lookup_internal(internal) }
140
- table.__send__("add_#{collection_type}", name.to_sym, *types)
141
- end
108
+ @@prop_extractors = []
109
+ def self.def_property(name,
110
+ option_method = name,
111
+ coercion = ->(val, _table_data){ val })
142
112
 
143
- def read_properties
144
- table_data.slice(*Table::STORAGE_PROPERTIES).each do |name, value|
145
- table.add_property(name, value)
146
- end
147
- compaction = JSON.parse(table_data['compaction_strategy_options'])
148
- .symbolize_keys
149
- compaction[:class] = table_data['compaction_strategy_class']
150
- table.add_property(:compaction, compaction)
151
- compression = JSON.parse(table_data['compression_parameters'])
152
- table.add_property(:compression, compression)
153
- end
113
+ @@prop_extractors << ->(table, table_data) {
114
+ raw_prop_val = table_data.options.public_send(option_method)
115
+ prop_val = coercion.call(raw_prop_val,table_data)
154
116
 
155
- def parse_composite_types(type_string)
156
- if COMPOSITE_TYPE_PATTERN =~ type_string
157
- $1.split(',')
158
- end
117
+ table.add_property(name, prop_val)
118
+ }
159
119
  end
160
120
 
161
- def table_data
162
- return @table_data if defined? @table_data
163
- table_query = keyspace.execute(<<-CQL, keyspace.name, table_name)
164
- SELECT * FROM system.schema_columnfamilies
165
- WHERE keyspace_name = ? AND columnfamily_name = ?
166
- CQL
167
- @table_data = table_query.first.try(:to_hash)
168
- end
121
+ def_property("bloom_filter_fp_chance")
122
+ def_property("caching")
123
+ def_property("comment")
124
+ def_property("local_read_repair_chance")
125
+ def_property("dclocal_read_repair_chance", :local_read_repair_chance)
126
+ def_property("compression", :compression,
127
+ ->(comp, table_data) {
128
+ comp.clone.tap { |r|
129
+ r["chunk_length_kb"] ||= r["chunk_length_in_kb"] if r["chunk_length_in_kb"]
130
+ r["crc_check_chance"] ||= table_data.options.crc_check_chance
131
+ }
132
+ })
133
+ def_property("compaction", :compaction_strategy,
134
+ ->(compaction_strategy, _table_data) {
135
+ compaction_strategy.options
136
+ .merge(class: compaction_strategy.class_name)
137
+ })
138
+ def_property("gc_grace_seconds")
139
+ def_property("read_repair_chance")
140
+ def_property("replicate_on_write", :replicate_on_write?)
169
141
 
170
- def all_columns
171
- @all_columns ||=
172
- if table_data
173
- column_query = keyspace.execute(<<-CQL, keyspace.name, table_name)
174
- SELECT * FROM system.schema_columns
175
- WHERE keyspace_name = ? AND columnfamily_name = ?
176
- CQL
177
- column_query.map(&:to_hash)
178
- end
142
+ def read_properties
143
+ @@prop_extractors.each do |extractor|
144
+ extractor.call(table, table_data)
145
+ end
179
146
  end
180
147
 
181
- def compact_value
182
- @compact_value ||= all_columns.find do |column|
183
- column['type'] == 'compact_value'
184
- end || {}
148
+ def read_table_settings
149
+ table.compact_storage = table_data.options.compact_storage?
185
150
  end
186
151
 
187
- def column_data
188
- @column_data ||= all_columns.select do |column|
189
- !column.key?('type') || column['type'] == 'regular'
190
- end
191
- end
152
+ def table_data
153
+ @table_data ||=
154
+ begin
155
+ cluster = keyspace.cluster
156
+ cluster.refresh_schema
192
157
 
193
- def partition_columns
194
- @partition_columns ||= all_columns.select do |column|
195
- column['type'] == 'partition_key'
196
- end
197
- end
158
+ fail(NoSuchKeyspaceError, "No such keyspace #{keyspace.name}") if
159
+ !cluster.has_keyspace?(keyspace.name)
198
160
 
199
- def cluster_columns
200
- @cluster_columns ||= all_columns.select do |column|
201
- column['type'] == 'clustering_key'
202
- end
161
+ cluster
162
+ .keyspace(keyspace.name)
163
+ .table(table_name.to_s)
164
+ end
203
165
  end
204
166
  end
205
167
  end