cequel 1.0.0.rc1 → 1.0.0.rc2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (68) hide show
  1. checksums.yaml +4 -4
  2. data/lib/cequel.rb +18 -0
  3. data/lib/cequel/errors.rb +8 -4
  4. data/lib/cequel/metal.rb +14 -0
  5. data/lib/cequel/metal/batch.rb +21 -11
  6. data/lib/cequel/metal/batch_manager.rb +74 -0
  7. data/lib/cequel/metal/cql_row_specification.rb +19 -6
  8. data/lib/cequel/metal/data_set.rb +400 -163
  9. data/lib/cequel/metal/deleter.rb +45 -11
  10. data/lib/cequel/metal/incrementer.rb +23 -10
  11. data/lib/cequel/metal/inserter.rb +19 -6
  12. data/lib/cequel/metal/keyspace.rb +82 -159
  13. data/lib/cequel/metal/logger.rb +71 -0
  14. data/lib/cequel/metal/logging.rb +47 -0
  15. data/lib/cequel/metal/new_relic_instrumentation.rb +26 -0
  16. data/lib/cequel/metal/row.rb +36 -10
  17. data/lib/cequel/metal/row_specification.rb +21 -8
  18. data/lib/cequel/metal/statement.rb +30 -6
  19. data/lib/cequel/metal/updater.rb +89 -12
  20. data/lib/cequel/metal/writer.rb +23 -14
  21. data/lib/cequel/record.rb +52 -6
  22. data/lib/cequel/record/association_collection.rb +13 -6
  23. data/lib/cequel/record/associations.rb +146 -54
  24. data/lib/cequel/record/belongs_to_association.rb +34 -7
  25. data/lib/cequel/record/bound.rb +69 -12
  26. data/lib/cequel/record/bulk_writes.rb +29 -1
  27. data/lib/cequel/record/callbacks.rb +22 -6
  28. data/lib/cequel/record/collection.rb +273 -36
  29. data/lib/cequel/record/configuration_generator.rb +5 -0
  30. data/lib/cequel/record/data_set_builder.rb +86 -0
  31. data/lib/cequel/record/dirty.rb +11 -8
  32. data/lib/cequel/record/errors.rb +38 -4
  33. data/lib/cequel/record/has_many_association.rb +42 -9
  34. data/lib/cequel/record/lazy_record_collection.rb +39 -10
  35. data/lib/cequel/record/mass_assignment.rb +14 -6
  36. data/lib/cequel/record/persistence.rb +157 -20
  37. data/lib/cequel/record/properties.rb +147 -24
  38. data/lib/cequel/record/railtie.rb +15 -2
  39. data/lib/cequel/record/record_set.rb +504 -75
  40. data/lib/cequel/record/schema.rb +77 -13
  41. data/lib/cequel/record/scoped.rb +16 -11
  42. data/lib/cequel/record/secondary_indexes.rb +42 -6
  43. data/lib/cequel/record/tasks.rb +2 -1
  44. data/lib/cequel/record/validations.rb +51 -11
  45. data/lib/cequel/schema.rb +9 -0
  46. data/lib/cequel/schema/column.rb +172 -33
  47. data/lib/cequel/schema/create_table_dsl.rb +62 -31
  48. data/lib/cequel/schema/keyspace.rb +106 -7
  49. data/lib/cequel/schema/migration_validator.rb +128 -0
  50. data/lib/cequel/schema/table.rb +183 -20
  51. data/lib/cequel/schema/table_property.rb +92 -34
  52. data/lib/cequel/schema/table_reader.rb +45 -15
  53. data/lib/cequel/schema/table_synchronizer.rb +101 -43
  54. data/lib/cequel/schema/table_updater.rb +114 -19
  55. data/lib/cequel/schema/table_writer.rb +31 -13
  56. data/lib/cequel/schema/update_table_dsl.rb +71 -40
  57. data/lib/cequel/type.rb +214 -53
  58. data/lib/cequel/util.rb +6 -9
  59. data/lib/cequel/version.rb +2 -1
  60. data/spec/examples/record/associations_spec.rb +12 -12
  61. data/spec/examples/record/persistence_spec.rb +5 -5
  62. data/spec/examples/record/record_set_spec.rb +62 -50
  63. data/spec/examples/schema/table_synchronizer_spec.rb +37 -11
  64. data/spec/examples/schema/table_updater_spec.rb +3 -3
  65. data/spec/examples/spec_helper.rb +2 -11
  66. data/spec/examples/type_spec.rb +3 -3
  67. metadata +23 -4
  68. data/lib/cequel/new_relic_instrumentation.rb +0 -22
@@ -1,36 +1,75 @@
1
1
  module Cequel
2
-
3
2
  module Schema
4
-
3
+ #
4
+ # Encapsulates a CQL3 storage property defined on a table
5
+ #
5
6
  class TableProperty
7
+ # @return [Symbol] name of the property
8
+ attr_reader :name
9
+ # @return value of the property
10
+ attr_reader :value
6
11
 
7
- attr_reader :name, :value
12
+ #
13
+ # Initialize an instance of the appropriate TableProperty implementation.
14
+ #
15
+ # @param (see #initialize)
16
+ # @api private
17
+ #
18
+ def self.build(name, value)
19
+ clazz =
20
+ case name.to_sym
21
+ when :compaction then CompactionProperty
22
+ when :compression then CompressionProperty
23
+ else TableProperty
24
+ end
25
+ clazz.new(name, value)
26
+ end
8
27
 
28
+ #
29
+ # @param name [Symbol] name of the property
30
+ # @param value value of the property
31
+ #
9
32
  def initialize(name, value)
10
33
  @name = name
11
- set_normalized_value(value)
34
+ self.normalized_value = value
12
35
  end
36
+ class << self; protected :new; end
13
37
 
38
+ #
39
+ # @return [String] CQL fragment defining this property in a `CREATE
40
+ # TABLE` statement
41
+ #
14
42
  def to_cql
15
- if Hash === @value
16
- map_pairs = @value.each_pair.
17
- map { |key, value| "#{quote(key.to_s)} : #{quote(value)}" }.
18
- join(', ')
19
- value_cql = "{ #{map_pairs} }"
20
- else
21
- value_cql = quote(@value)
22
- end
23
43
  "#{@name} = #{value_cql}"
24
44
  end
25
45
 
46
+ protected
47
+
48
+ def normalized_value=(value)
49
+ @value = value
50
+ end
51
+
26
52
  private
27
53
 
54
+ def value_cql
55
+ quote(@value)
56
+ end
57
+
28
58
  def quote(value)
29
59
  CassandraCQL::Statement.quote(value)
30
60
  end
61
+ end
62
+
63
+ #
64
+ # A table property whose value is itself a map of keys and values
65
+ #
66
+ # @abstract Inheriting classes must implement
67
+ # `#normalize_map_property(key, value)`
68
+ #
69
+ class MapProperty < TableProperty
70
+ protected
31
71
 
32
- def set_normalized_value(map)
33
- return @value = map unless Hash === map
72
+ def normalized_value=(map)
34
73
  @value = {}
35
74
  map.each_pair do |key, value|
36
75
  key = key.to_sym
@@ -38,30 +77,49 @@ module Cequel
38
77
  end
39
78
  end
40
79
 
80
+ private
81
+
82
+ def value_cql
83
+ map_pairs = @value.each_pair
84
+ .map { |key, value| "#{quote(key.to_s)} : #{quote(value)}" }
85
+ .join(', ')
86
+ "{ #{map_pairs} }"
87
+ end
88
+ end
89
+
90
+ #
91
+ # A property comprising key-value pairs of compaction settings
92
+ #
93
+ class CompactionProperty < MapProperty
94
+ private
95
+
41
96
  def normalize_map_property(key, value)
42
- case @name
43
- when :compaction
44
- case key
45
- when :class
46
- value.sub(/^org\.apache\.cassandra\.db\.compaction\./, '')
47
- when :bucket_high, :bucket_low, :tombstone_threshold then value.to_f
48
- when :max_threshold, :min_threshold, :min_sstable_size,
49
- :sstable_size_in_mb, :tombstone_compaction_interval then value.to_i
50
- else value.to_s
51
- end
52
- when :compression
53
- case key
54
- when :sstable_compression
55
- value.sub(/^org\.apache\.cassandra\.io\.compress\./, '')
56
- when :chunk_length_kb then value.to_i
57
- when :crc_check_chance then value.to_f
58
- else value.to_s
59
- end
97
+ case key
98
+ when :class
99
+ value.sub(/^org\.apache\.cassandra\.db\.compaction\./, '')
100
+ when :bucket_high, :bucket_low, :tombstone_threshold then value.to_f
101
+ when :max_threshold, :min_threshold, :min_sstable_size,
102
+ :sstable_size_in_mb, :tombstone_compaction_interval then value.to_i
103
+ else value.to_s
60
104
  end
61
105
  end
62
-
63
106
  end
64
107
 
65
- end
108
+ #
109
+ # A property comprising key-value pairs of compression settings
110
+ #
111
+ class CompressionProperty < MapProperty
112
+ private
66
113
 
114
+ def normalize_map_property(key, value)
115
+ case key
116
+ when :sstable_compression
117
+ value.sub(/^org\.apache\.cassandra\.io\.compress\./, '')
118
+ when :chunk_length_kb then value.to_i
119
+ when :crc_check_chance then value.to_f
120
+ else value.to_s
121
+ end
122
+ end
123
+ end
124
+ end
67
125
  end
@@ -1,9 +1,11 @@
1
1
  module Cequel
2
-
3
2
  module Schema
4
-
3
+ #
4
+ # A TableReader will query Cassandra's internal representation of a table's
5
+ # schema, and build a {Table} instance exposing an object representation of
6
+ # that schema
7
+ #
5
8
  class TableReader
6
-
7
9
  COMPOSITE_TYPE_PATTERN =
8
10
  /^org\.apache\.cassandra\.db\.marshal\.CompositeType\((.+)\)$/
9
11
  REVERSED_TYPE_PATTERN =
@@ -11,22 +13,40 @@ module Cequel
11
13
  COLLECTION_TYPE_PATTERN =
12
14
  /^org\.apache\.cassandra\.db\.marshal\.(List|Set|Map)Type\((.+)\)$/
13
15
 
14
- STORAGE_PROPERTIES = %w[bloom_filter_fp_chance caching comment compaction
15
- compression dclocal_read_repair_chance gc_grace_seconds
16
- read_repair_chance replicate_on_write]
17
-
16
+ # @return [Table] object representation of the table defined in the
17
+ # database
18
18
  attr_reader :table
19
19
 
20
+ #
21
+ # Read the schema defined in the database for a given table and return a
22
+ # {Table} instance
23
+ #
24
+ # @param (see #initialize)
25
+ # @return (see #read)
26
+ #
20
27
  def self.read(keyspace, table_name)
21
28
  new(keyspace, table_name).read
22
29
  end
23
30
 
31
+ #
32
+ # @param keyspace [Metal::Keyspace] keyspace to read the table from
33
+ # @param table_name [Symbol] name of the table to read
34
+ # @private
35
+ #
24
36
  def initialize(keyspace, table_name)
25
37
  @keyspace, @table_name = keyspace, table_name
26
38
  @table = Table.new(table_name.to_sym)
27
39
  end
28
40
  private_class_method(:new)
29
41
 
42
+ #
43
+ # Read table schema from the database
44
+ #
45
+ # @return [Table] object representation of table in the database, or
46
+ # `nil` if no table by given name exists
47
+ #
48
+ # @api private
49
+ #
30
50
  def read
31
51
  if table_data.present?
32
52
  read_partition_keys
@@ -38,10 +58,19 @@ module Cequel
38
58
  end
39
59
 
40
60
  protected
61
+
41
62
  attr_reader :keyspace, :table_name, :table
42
63
 
43
64
  private
44
65
 
66
+ # XXX This gets a lot easier in Cassandra 2.0: all logical columns
67
+ # (including keys) are returned from the `schema_columns` query, so
68
+ # there's no need to jump through all these hoops to figure out what the
69
+ # key columns look like.
70
+ #
71
+ # However, this approach works for both 1.2 and 2.0, so better to keep it
72
+ # for now. It will be worth refactoring this code to take advantage of
73
+ # 2.0's better interface in a future version of Cequel that targets 2.0+.
45
74
  def read_partition_keys
46
75
  validator = table_data['key_validator']
47
76
  types = parse_composite_types(validator) || [validator]
@@ -51,6 +80,7 @@ module Cequel
51
80
  end
52
81
  end
53
82
 
83
+ # XXX See comment on {read_partition_keys}
54
84
  def read_clustering_columns
55
85
  column_aliases = JSON.parse(table_data['column_aliases'])
56
86
  comparators = parse_composite_types(table_data['comparator'])
@@ -100,16 +130,17 @@ module Cequel
100
130
  end
101
131
 
102
132
  def read_collection_column(name, collection_type, *internal_types)
103
- types = internal_types.map { |internal| Type.lookup_internal(internal) }
133
+ types = internal_types
134
+ .map { |internal| Type.lookup_internal(internal) }
104
135
  table.__send__("add_#{collection_type}", name.to_sym, *types)
105
136
  end
106
137
 
107
138
  def read_properties
108
- table_data.slice(*STORAGE_PROPERTIES).each do |name, value|
139
+ table_data.slice(*Table::STORAGE_PROPERTIES).each do |name, value|
109
140
  table.add_property(name, value)
110
141
  end
111
- compaction = JSON.parse(table_data['compaction_strategy_options']).
112
- symbolize_keys
142
+ compaction = JSON.parse(table_data['compaction_strategy_options'])
143
+ .symbolize_keys
113
144
  compaction[:class] = table_data['compaction_strategy_class']
114
145
  table.add_property(:compaction, compaction)
115
146
  compression = JSON.parse(table_data['compression_parameters'])
@@ -138,12 +169,11 @@ module Cequel
138
169
  SELECT * FROM system.schema_columns
139
170
  WHERE keyspace_name = ? AND columnfamily_name = ?
140
171
  CQL
141
- column_query.map(&:to_hash)
172
+ column_query.map(&:to_hash).select do |column|
173
+ !column.key?('type') || column['type'] == 'regular'
174
+ end
142
175
  end
143
176
  end
144
-
145
177
  end
146
-
147
178
  end
148
-
149
179
  end
@@ -1,9 +1,30 @@
1
1
  module Cequel
2
-
3
2
  module Schema
4
-
3
+ #
4
+ # Synchronize a table schema in the database with a desired table schema
5
+ #
6
+ # @see .apply
7
+ # @see Keyspace#synchronize_table
8
+ #
5
9
  class TableSynchronizer
6
-
10
+ # @return [Table] table as it is currently defined
11
+ # @api private
12
+ attr_reader :existing
13
+ # @return [Table] table schema as it is desired
14
+ # @api private
15
+ attr_reader :updated
16
+ #
17
+ # Takes an existing table schema read from the database, and a desired
18
+ # schema for that table. Modifies the table schema in the database to
19
+ # match the desired schema, or creates the table as specified if it does
20
+ # not yet exist
21
+ #
22
+ # @param keyspace [Metal::Keyspace] keyspace that contains table
23
+ # @param existing [Table] table schema as it is currently defined
24
+ # @param updated [Table] table schema as it is desired
25
+ # @return [void]
26
+ # @raise (see #apply)
27
+ #
7
28
  def self.apply(keyspace, existing, updated)
8
29
  if existing
9
30
  TableUpdater.apply(keyspace, existing.name) do |updater|
@@ -14,28 +35,92 @@ module Cequel
14
35
  end
15
36
  end
16
37
 
38
+ #
39
+ # @param updater [TableUpdater] table updater to hold schema
40
+ # modifications
41
+ # @param existing [Table] table schema as it is currently defined
42
+ # @param updated [Table] table schema as it is desired
43
+ # @return [void]
44
+ # @private
45
+ #
17
46
  def initialize(updater, existing, updated)
18
47
  @updater, @existing, @updated = updater, existing, updated
19
48
  end
20
49
  private_class_method :new
21
50
 
51
+ #
52
+ # Apply the changes needed to synchronize the schema in the database with
53
+ # the desired schema
54
+ #
55
+ # @return [void]
56
+ # @raise (see MigrationValidator#validate!)
57
+ #
58
+ # @api private
59
+ #
22
60
  def apply
61
+ validate!
23
62
  update_keys
24
63
  update_columns
25
64
  update_properties
26
65
  end
27
66
 
67
+ #
68
+ # Iterate over pairs of (old_key, new_key)
69
+ #
70
+ # @yieldparam old_key [Column] key in existing schema
71
+ # @yieldparam new_key [Column] corresponding key in updated schema
72
+ # @return [void]
73
+ #
74
+ # @api private
75
+ #
76
+ def each_key_pair(&block)
77
+ existing.key_columns.zip(updated.key_columns, &block)
78
+ end
79
+
80
+ #
81
+ # Iterate over pairs of (old_column, new_column)
82
+ #
83
+ # @yieldparam old_column [Column] column in existing schema
84
+ # @yieldparam new_column [Column] corresponding column in updated schema
85
+ # @return [void]
86
+ #
87
+ # @api private
88
+ #
89
+ def each_data_column_pair(&block)
90
+ if existing.compact_storage? && existing.clustering_columns.any?
91
+ yield existing.data_columns.first, updated.data_columns.first
92
+ else
93
+ old_columns = existing.data_columns.index_by { |col| col.name }
94
+ new_columns = updated.data_columns.index_by { |col| col.name }
95
+ all_column_names = (old_columns.keys + new_columns.keys).tap(&:uniq!)
96
+ all_column_names.each do |name|
97
+ yield old_columns[name], new_columns[name]
98
+ end
99
+ end
100
+ end
101
+
102
+ #
103
+ # Iterate over pairs of (old_clustering_column, new_clustering_column)
104
+ #
105
+ # @yieldparam old_clustering_column [Column] key in existing schema
106
+ # @yieldparam new_clustering_column [Column] corresponding key in updated
107
+ # schema
108
+ # @return [void]
109
+ #
110
+ # @api private
111
+ #
112
+ def each_clustering_column_pair(&block)
113
+ existing.clustering_columns.zip(updated.clustering_columns, &block)
114
+ end
115
+
28
116
  protected
29
- attr_reader :updater, :existing, :updated
117
+
118
+ attr_reader :updater
30
119
 
31
120
  private
32
121
 
33
122
  def update_keys
34
123
  each_key_pair do |old_key, new_key|
35
- if old_key.type != new_key.type
36
- raise InvalidSchemaMigration,
37
- "Can't change type of key column #{old_key.name} from #{old_key.type} to #{new_key.type}"
38
- end
39
124
  if old_key.name != new_key.name
40
125
  updater.rename_column(old_key.name || :column1, new_key.name)
41
126
  end
@@ -43,24 +128,19 @@ module Cequel
43
128
  end
44
129
 
45
130
  def update_columns
46
- each_column_pair do |old_column, new_column|
131
+ each_data_column_pair do |old_column, new_column|
47
132
  if old_column.nil?
48
133
  add_column(new_column)
49
134
  elsif new_column
50
- if old_column.class != new_column.class
51
- raise InvalidSchemaMigration,
52
- "Can't change #{old_column.name} from #{old_column.class.name.demodulize} to #{new_column.class.name.demodulize}"
53
- end
54
135
  update_column(old_column, new_column)
136
+ update_index(old_column, new_column)
55
137
  end
56
138
  end
57
139
  end
58
140
 
59
141
  def add_column(column)
60
142
  updater.add_data_column(column)
61
- if column.indexed?
62
- updater.create_index(column.name, column.index_name)
63
- end
143
+ updater.create_index(column.name, column.index_name) if column.indexed?
64
144
  end
65
145
 
66
146
  def update_column(old_column, new_column)
@@ -70,6 +150,9 @@ module Cequel
70
150
  if old_column.type != new_column.type
71
151
  updater.change_column(new_column.name, new_column.type)
72
152
  end
153
+ end
154
+
155
+ def update_index(old_column, new_column)
73
156
  if !old_column.indexed? && new_column.indexed?
74
157
  updater.create_index(new_column.name, new_column.index_name)
75
158
  elsif old_column.indexed? && !new_column.indexed?
@@ -88,34 +171,9 @@ module Cequel
88
171
  updater.change_properties(changes) if changes.any?
89
172
  end
90
173
 
91
- def each_key_pair(&block)
92
- if existing.partition_key_columns.length != updated.partition_key_columns.length
93
- raise InvalidSchemaMigration,
94
- "Existing partition keys #{existing.partition_key_columns.map { |key| key.name }.join(',')} differ from specified partition keys #{updated.partition_key_columns.map { |key| key.name }.join(',')}"
95
- end
96
- if existing.clustering_columns.length != updated.clustering_columns.length
97
- raise InvalidSchemaMigration,
98
- "Existing clustering keys #{existing.clustering_columns.map { |key| key.name }.join(',')} differ from specified clustering keys #{updated.clustering_columns.map { |key| key.name }.join(',')}"
99
- end
100
- existing.partition_key_columns.zip(updated.partition_key_columns, &block)
101
- existing.clustering_columns.zip(updated.clustering_columns, &block)
102
- end
103
-
104
- def each_column_pair(&block)
105
- if existing.compact_storage? && existing.clustering_columns.any?
106
- yield existing.data_columns.first, updated.data_columns.first
107
- else
108
- old_columns = existing.data_columns.index_by { |col| col.name }
109
- new_columns = updated.data_columns.index_by { |col| col.name }
110
- all_column_names = (old_columns.keys + new_columns.keys).tap(&:uniq!)
111
- all_column_names.each do |name|
112
- yield old_columns[name], new_columns[name]
113
- end
114
- end
174
+ def validate!
175
+ MigrationValidator.validate!(self)
115
176
  end
116
-
117
177
  end
118
-
119
178
  end
120
-
121
179
  end