cassandra 0.13.0 → 0.14.0

Sign up to get free protection for your applications and to get access to all the features.
@@ -0,0 +1,40 @@
1
+ # Licensed to the Apache Software Foundation (ASF) under one
2
+ # or more contributor license agreements. See the NOTICE file
3
+ # distributed with this work for additional information
4
+ # regarding copyright ownership. The ASF licenses this file
5
+ # to you under the Apache License, Version 2.0 (the
6
+ # "License"); you may not use this file except in compliance
7
+ # with the License. You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ # for production, you should probably set pattern to %c instead of %l.
18
+ # (%l is slower.)
19
+
20
+ # output messages into a rolling log file as well as stdout
21
+ log4j.rootLogger=INFO,stdout,R
22
+
23
+ # stdout
24
+ log4j.appender.stdout=org.apache.log4j.ConsoleAppender
25
+ log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
26
+ log4j.appender.stdout.layout.ConversionPattern=%5p %d{HH:mm:ss,SSS} %m%n
27
+
28
+ # rolling log file
29
+ log4j.appender.R=org.apache.log4j.RollingFileAppender
30
+ log4j.appender.R.maxFileSize=20MB
31
+ log4j.appender.R.maxBackupIndex=50
32
+ log4j.appender.R.layout=org.apache.log4j.PatternLayout
33
+ log4j.appender.R.layout.ConversionPattern=%5p [%t] %d{ISO8601} %F (line %L) %m%n
34
+ # Edit the next line to point to your logs directory
35
+ log4j.appender.R.File=data/logs/system.log
36
+
37
+ # Application logging options
38
+ #log4j.logger.org.apache.cassandra=DEBUG
39
+ #log4j.logger.org.apache.cassandra.db=DEBUG
40
+ #log4j.logger.org.apache.cassandra.service.StorageProxy=DEBUG
@@ -0,0 +1,72 @@
1
+ {"Twitter":{
2
+ "Users":{
3
+ "comparator_type":"org.apache.cassandra.db.marshal.UTF8Type",
4
+ "column_type":"Standard"},
5
+ "UserAudits":{
6
+ "comparator_type":"org.apache.cassandra.db.marshal.UTF8Type",
7
+ "column_type":"Standard"},
8
+ "UserCounters":{
9
+ "comparator_type":"org.apache.cassandra.db.marshal.UTF8Type",
10
+ "column_type":"Standard",
11
+ "default_validation_class":"CounterColumnType"},
12
+ "UserCounterAggregates":{
13
+ "subcomparator_type":"org.apache.cassandra.db.marshal.UTF8Type",
14
+ "comparator_type":"org.apache.cassandra.db.marshal.UTF8Type",
15
+ "column_type":"Super",
16
+ "default_validation_class":"CounterColumnType"},
17
+ "UserRelationships":{
18
+ "subcomparator_type":"org.apache.cassandra.db.marshal.TimeUUIDType",
19
+ "comparator_type":"org.apache.cassandra.db.marshal.UTF8Type",
20
+ "column_type":"Super"},
21
+ "Usernames":{
22
+ "comparator_type":"org.apache.cassandra.db.marshal.UTF8Type",
23
+ "column_type":"Standard"},
24
+ "Statuses":{
25
+ "comparator_type":"org.apache.cassandra.db.marshal.UTF8Type",
26
+ "column_type":"Standard"},
27
+ "StatusAudits":{
28
+ "comparator_type":"org.apache.cassandra.db.marshal.UTF8Type",
29
+ "column_type":"Standard"},
30
+ "StatusRelationships":{
31
+ "subcomparator_type":"org.apache.cassandra.db.marshal.TimeUUIDType",
32
+ "comparator_type":"org.apache.cassandra.db.marshal.UTF8Type",
33
+ "column_type":"Super"},
34
+ "Indexes":{
35
+ "comparator_type":"org.apache.cassandra.db.marshal.UTF8Type",
36
+ "column_type":"Super"},
37
+ "TimelinishThings":{
38
+ "comparator_type":"org.apache.cassandra.db.marshal.BytesType",
39
+ "column_type":"Standard"}
40
+ },
41
+ "Multiblog":{
42
+ "Blogs":{
43
+ "comparator_type":"org.apache.cassandra.db.marshal.TimeUUIDType",
44
+ "column_type":"Standard"},
45
+ "Comments":{
46
+ "comparator_type":"org.apache.cassandra.db.marshal.TimeUUIDType",
47
+ "column_type":"Standard"}
48
+ },
49
+ "MultiblogLong":{
50
+ "Blogs":{
51
+ "comparator_type":"org.apache.cassandra.db.marshal.LongType",
52
+ "column_type":"Standard"},
53
+ "Comments":{
54
+ "comparator_type":"org.apache.cassandra.db.marshal.LongType",
55
+ "column_type":"Standard"}
56
+ },
57
+ "TypeConversions":{
58
+ "UUIDColumnConversion":{
59
+ "comparator_type":"org.apache.cassandra.db.marshal.TimeUUIDType",
60
+ "column_type":"Standard"},
61
+ "SuperUUID":{
62
+ "subcomparator_type":"org.apache.cassandra.db.marshal.TimeUUIDType",
63
+ "comparator_type":"org.apache.cassandra.db.marshal.TimeUUIDType",
64
+ "column_type":"Super"},
65
+ "CompositeColumnConversion":{
66
+ "comparator_type":"org.apache.cassandra.db.marshal.CompositeType(org.apache.cassandra.db.marshal.IntegerType,org.apache.cassandra.db.marshal.UTF8Type)",
67
+ "column_type":"Standard"},
68
+ "DynamicComposite":{
69
+ "comparator_type":"org.apache.cassandra.db.marshal.DynamicCompositeType(u=>org.apache.cassandra.db.marshal.UUIDType,t=>org.apache.cassandra.db.marshal.TimeUUIDType,s=>org.apache.cassandra.db.marshal.UTF8Type,b=>org.apache.cassandra.db.marshal.BytesType,a=>org.apache.cassandra.db.marshal.AsciiType,l=>org.apache.cassandra.db.marshal.LongType,x=>org.apache.cassandra.db.marshal.LexicalUUIDType,i=>org.apache.cassandra.db.marshal.IntegerType)",
70
+ "column_type":"Standard"}
71
+ }
72
+ }
@@ -0,0 +1,57 @@
1
+ create keyspace Twitter with
2
+ placement_strategy = 'org.apache.cassandra.locator.SimpleStrategy' AND
3
+ strategy_options = {replication_factor:1};
4
+ use Twitter;
5
+ create column family Users with comparator = 'UTF8Type';
6
+ create column family UserAudits with comparator = 'UTF8Type';
7
+ create column family UserCounters with comparator = 'UTF8Type' and
8
+ default_validation_class = CounterColumnType;
9
+ create column family UserCounterAggregates with column_type = 'Super'
10
+ and comparator = 'UTF8Type' and
11
+ subcomparator = 'UTF8Type' and
12
+ default_validation_class = CounterColumnType;
13
+ create column family UserRelationships with
14
+ comparator = 'UTF8Type' and
15
+ column_type = 'Super' and
16
+ subcomparator = 'TimeUUIDType';
17
+ create column family Usernames with comparator = 'UTF8Type';
18
+ create column family Statuses
19
+ with comparator = 'UTF8Type'
20
+ and column_metadata = [
21
+ {column_name: 'tags', validation_class: 'BytesType', index_type: 'KEYS'}
22
+ ];
23
+ create column family StatusAudits with comparator = 'UTF8Type';
24
+ create column family StatusRelationships with
25
+ comparator = 'UTF8Type' and
26
+ column_type = 'Super' and
27
+ subcomparator = 'TimeUUIDType';
28
+ create column family Indexes with
29
+ comparator = 'UTF8Type' and
30
+ column_type = 'Super';
31
+ create column family TimelinishThings with
32
+ comparator = 'BytesType';
33
+
34
+ create keyspace Multiblog with
35
+ placement_strategy = 'org.apache.cassandra.locator.SimpleStrategy' AND
36
+ strategy_options = {replication_factor:1};
37
+ use Multiblog;
38
+ create column family Blogs with comparator = 'TimeUUIDType';
39
+ create column family Comments with comparator = 'TimeUUIDType';
40
+
41
+
42
+ create keyspace MultiblogLong with
43
+ placement_strategy = 'org.apache.cassandra.locator.SimpleStrategy' AND
44
+ strategy_options = {replication_factor:1};
45
+ use MultiblogLong;
46
+ create column family Blogs with comparator = 'LongType';
47
+ create column family Comments with comparator = 'LongType';
48
+
49
+ create keyspace TypeConversions with
50
+ placement_strategy = 'org.apache.cassandra.locator.SimpleStrategy' AND
51
+ strategy_options = {replication_factor:1};
52
+ use TypeConversions;
53
+ create column family UUIDColumnConversion with comparator = TimeUUIDType;
54
+ create column family SuperUUID with comparator = TimeUUIDType and column_type = Super;
55
+ create column family CompositeColumnConversion with comparator = 'CompositeType(IntegerType, UTF8Type)';
56
+ create column family DynamicComposite with comparator ='DynamicCompositeType
57
+ (a=>AsciiType,b=>BytesType,i=>IntegerType,x=>LexicalUUIDType,l=>LongType,t=>TimeUUIDType,s=>UTF8Type,u=>UUIDType)';
@@ -0,0 +1,34 @@
1
+ #include <ruby.h>
2
+ #include <arpa/inet.h>
3
+
4
+ VALUE parts_ivar_id;
5
+
6
+ VALUE rb_cassandra_composite_fast_unpack(VALUE self, VALUE packed_string_value) {
7
+ int i = 0;
8
+ int index = 0;
9
+ int message_length = RSTRING_LEN(packed_string_value);
10
+ char *packed_string = (char *)RSTRING_PTR(packed_string_value);
11
+
12
+ VALUE parts = rb_ary_new();
13
+ while (index < message_length) {
14
+ uint16_t length = ntohs(((uint16_t *)(packed_string+index))[0]);
15
+ VALUE part = rb_str_new("", length);
16
+ for (i = 0; i < length; i++) {
17
+ ((char *)RSTRING_PTR(part))[i] = packed_string[index+2+i];
18
+ }
19
+ rb_ary_push(parts, part);
20
+ index += length + 3;
21
+ }
22
+
23
+ rb_ivar_set(self, parts_ivar_id, parts);
24
+
25
+ return Qnil;
26
+ }
27
+
28
+ void Init_cassandra_native(void) {
29
+ VALUE cassandra_module = rb_const_get(rb_cObject, rb_intern("Cassandra"));
30
+ VALUE cassandra_composite_class = rb_define_class_under(cassandra_module, "Composite", rb_cObject);
31
+ rb_define_method(cassandra_composite_class, "fast_unpack", rb_cassandra_composite_fast_unpack, 1);
32
+
33
+ parts_ivar_id = rb_intern("@parts");
34
+ }
@@ -0,0 +1,9 @@
1
+ if defined?(RUBY_ENGINE) && RUBY_ENGINE =~ /jruby/
2
+ File.open('Makefile', 'w'){|f| f.puts "all:\n\ninstall:\n" }
3
+ else
4
+ require 'mkmf'
5
+
6
+ $CFLAGS = "-g -O2 -Wall -Werror"
7
+
8
+ create_makefile 'cassandra_native'
9
+ end
@@ -24,6 +24,7 @@ require 'cassandra/time'
24
24
  require 'cassandra/comparable'
25
25
  require 'cassandra/long'
26
26
  require 'cassandra/composite'
27
+ require 'cassandra/dynamic_composite'
27
28
  require 'cassandra/ordered_hash'
28
29
  require 'cassandra/columns'
29
30
  require 'cassandra/protocol'
@@ -12,7 +12,7 @@ class Cassandra
12
12
  client.remove(@keyspace, key, column_path, timestamp, consistency_level)
13
13
  end
14
14
 
15
- def _count_columns(column_family, key, super_column, consistency)
15
+ def _count_columns(column_family, key, super_column, start, stop, count, consistency)
16
16
  client.get_count(@keyspace, key,
17
17
  CassandraThrift::ColumnParent.new(:column_family => column_family, :super_column => super_column),
18
18
  consistency
@@ -55,11 +55,11 @@ class Cassandra
55
55
 
56
56
  # Slices
57
57
  else
58
- predicate = CassandraThrift::SlicePredicate.new(:slice_range =>
58
+ predicate = CassandraThrift::SlicePredicate.new(:slice_range =>
59
59
  CassandraThrift::SliceRange.new(
60
- :reversed => reversed,
61
- :count => count,
62
- :start => start,
60
+ :reversed => reversed,
61
+ :count => count,
62
+ :start => start,
63
63
  :finish => finish))
64
64
 
65
65
  if is_super(column_family) and column
@@ -72,16 +72,17 @@ class Cassandra
72
72
  end
73
73
  end
74
74
 
75
- def _get_range(column_family, start_key, finish_key, key_count, columns, start, finish, count, consistency)
75
+ def _get_range(column_family, start_key, finish_key, key_count, columns, start, finish, count, consistency, reversed=false)
76
76
  column_parent = CassandraThrift::ColumnParent.new(:column_family => column_family)
77
77
  predicate = if columns
78
78
  CassandraThrift::SlicePredicate.new(:column_names => columns)
79
79
  else
80
- CassandraThrift::SlicePredicate.new(:slice_range =>
80
+ CassandraThrift::SlicePredicate.new(:slice_range =>
81
81
  CassandraThrift::SliceRange.new(
82
- :start => start,
82
+ :start => start,
83
83
  :finish => finish,
84
- :count => count))
84
+ :count => count,
85
+ :reversed => reversed))
85
86
  end
86
87
  range = CassandraThrift::KeyRange.new(:start_key => start_key, :end_key => finish_key, :count => key_count)
87
88
  client.get_range_slices(@keyspace, column_parent, predicate, range, consistency)
@@ -3,7 +3,7 @@
3
3
  Create a new Cassandra client instance. Accepts a keyspace name, and optional host and port.
4
4
 
5
5
  client = Cassandra.new('twitter', '127.0.0.1:9160')
6
-
6
+
7
7
  If the server requires authentication, you must authenticate before make calls
8
8
 
9
9
  client.login!('username','password')
@@ -64,6 +64,8 @@ class Cassandra
64
64
  :thrift_client_class => ThriftClient
65
65
  }
66
66
 
67
+ THRIFT_DEFAULTS[:protocol] = Thrift::BinaryProtocolAccelerated if Thrift.const_defined?(:BinaryProtocolAccelerated)
68
+
67
69
  attr_reader :keyspace, :servers, :schema, :thrift_client_options, :thrift_client_class, :auth_request
68
70
 
69
71
  def self.DEFAULT_TRANSPORT_WRAPPER
@@ -75,6 +77,8 @@ class Cassandra
75
77
  @is_super = {}
76
78
  @column_name_class = {}
77
79
  @sub_column_name_class = {}
80
+ @column_name_maker = {}
81
+ @sub_column_name_maker = {}
78
82
  @auto_discover_nodes = true
79
83
  thrift_client_options[:transport_wrapper] ||= Cassandra.DEFAULT_TRANSPORT_WRAPPER
80
84
  @thrift_client_options = THRIFT_DEFAULTS.merge(thrift_client_options)
@@ -462,7 +466,7 @@ class Cassandra
462
466
 
463
467
  ##
464
468
  # This method is used to delete (actually marking them as deleted with a
465
- # tombstone) rows, columns, or super columns depending on the parameters
469
+ # tombstone) rows, columns, or super columns depending on the parameters
466
470
  # passed. If only a key is passed the entire row will be marked as deleted.
467
471
  # If a column name is passed in that column will be deleted.
468
472
  #
@@ -483,14 +487,14 @@ class Cassandra
483
487
  column_family, column, sub_column, options = extract_and_validate_params(column_family, key, columns_and_options, WRITE_DEFAULTS)
484
488
 
485
489
  if @batch
486
- mutation_map =
490
+ mutation_map =
487
491
  {
488
492
  key => {
489
493
  column_family => [ _delete_mutation(column_family, column, sub_column, options[:timestamp]|| Time.stamp) ]
490
494
  }
491
495
  }
492
496
  @batch << [mutation_map, options[:consistency]]
493
- else
497
+ else
494
498
  # Let's continue using the 'remove' thrift method...not sure about the implications/performance of using the mutate instead
495
499
  # Otherwise we coul get use the mutation_map above, and do _mutate(mutation_map, options[:consistency])
496
500
  args = {:column_family => column_family}
@@ -514,8 +518,8 @@ class Cassandra
514
518
  # * :consistency - Uses the default read consistency if none specified.
515
519
  #
516
520
  def count_columns(column_family, key, *columns_and_options)
517
- column_family, super_column, _, options =
518
- extract_and_validate_params(column_family, key, columns_and_options, READ_DEFAULTS)
521
+ column_family, super_column, _, options =
522
+ extract_and_validate_params(column_family, key, columns_and_options, READ_DEFAULTS)
519
523
  _count_columns(column_family, key, super_column, options[:start], options[:stop], options[:count], options[:consistency])
520
524
  end
521
525
 
@@ -538,7 +542,7 @@ class Cassandra
538
542
  end
539
543
 
540
544
  ##
541
- # Return a hash of column value pairs for the path you request.
545
+ # Return a hash of column value pairs for the path you request.
542
546
  #
543
547
  # * column_family - The column_family that you are inserting into.
544
548
  # * key - The row key to insert.
@@ -548,8 +552,8 @@ class Cassandra
548
552
  # * :consistency - Uses the default read consistency if none specified.
549
553
  #
550
554
  def get_columns(column_family, key, *columns_and_options)
551
- column_family, columns, sub_columns, options =
552
- extract_and_validate_params(column_family, key, columns_and_options, READ_DEFAULTS)
555
+ column_family, columns, sub_columns, options =
556
+ extract_and_validate_params(column_family, key, columns_and_options, READ_DEFAULTS)
553
557
  _get_columns(column_family, key, columns, sub_columns, options[:consistency])
554
558
  end
555
559
 
@@ -574,7 +578,7 @@ class Cassandra
574
578
  ##
575
579
  # Return a hash (actually, a Cassandra::OrderedHash) or a single value
576
580
  # representing the element at the column_family:key:[column]:[sub_column]
577
- # path you request.
581
+ # path you request.
578
582
  #
579
583
  # * column_family - The column_family that you are inserting into.
580
584
  # * key - The row key to insert.
@@ -613,7 +617,7 @@ class Cassandra
613
617
  # * :consistency - Uses the default read consistency if none specified.
614
618
  #
615
619
  def multi_get(column_family, keys, *columns_and_options)
616
- column_family, column, sub_column, options =
620
+ column_family, column, sub_column, options =
617
621
  extract_and_validate_params(column_family, keys, columns_and_options, READ_DEFAULTS)
618
622
 
619
623
  hash = _multiget(column_family, keys, column, sub_column, options[:count], options[:start], options[:finish], options[:reversed], options[:consistency])
@@ -642,7 +646,7 @@ class Cassandra
642
646
  # * :consistency - Uses the default read consistency if none specified.
643
647
  #
644
648
  def exists?(column_family, key, *columns_and_options)
645
- column_family, column, sub_column, options =
649
+ column_family, column, sub_column, options =
646
650
  extract_and_validate_params(column_family, key, columns_and_options, READ_DEFAULTS)
647
651
  result = if column
648
652
  _multiget(column_family, [key], column, sub_column, 1, '', '', false, options[:consistency])[key]
@@ -663,7 +667,7 @@ class Cassandra
663
667
  # Cassandra#get_range_single will be used.
664
668
  #
665
669
  # The start_key and finish_key parameters are only useful for iterating of all records
666
- # as is done in the Cassandra#each and Cassandra#each_key methods if you are using the
670
+ # as is done in the Cassandra#each and Cassandra#each_key methods if you are using the
667
671
  # RandomPartitioner.
668
672
  #
669
673
  # If the table is partitioned with OrderPreservingPartitioner you may
@@ -674,11 +678,11 @@ class Cassandra
674
678
  # each record returned.
675
679
  #
676
680
  # Please note that Cassandra returns a row for each row that has existed in the
677
- # system since gc_grace_seconds. This is because deleted row keys are marked as
681
+ # system since gc_grace_seconds. This is because deleted row keys are marked as
678
682
  # deleted, but left in the system until the cluster has had resonable time to replicate the deletion.
679
683
  # This function attempts to suppress deleted rows (actually any row returned without
680
684
  # columns is suppressed).
681
- #
685
+ #
682
686
  # Please note that when enabling the :reversed option, :start and :finish should be swapped (e.g.
683
687
  # reversal happens before selecting the range).
684
688
  #
@@ -712,8 +716,8 @@ class Cassandra
712
716
  def get_range_single(column_family, options = {})
713
717
  return_empty_rows = options.delete(:return_empty_rows) || false
714
718
 
715
- column_family, _, _, options =
716
- extract_and_validate_params(column_family, "", [options],
719
+ column_family, _, _, options =
720
+ extract_and_validate_params(column_family, "", [options],
717
721
  READ_DEFAULTS.merge(:start_key => '',
718
722
  :finish_key => '',
719
723
  :key_count => 100,
@@ -847,10 +851,10 @@ class Cassandra
847
851
  @batch = []
848
852
  yield(self)
849
853
  compacted_map,seen_clevels = compact_mutations!
850
- clevel = if options[:consistency] != nil # Override any clevel from individual mutations if
854
+ clevel = if options[:consistency] != nil # Override any clevel from individual mutations if
851
855
  options[:consistency]
852
856
  elsif seen_clevels.length > 1 # Cannot choose which CLevel to use if there are several ones
853
- raise "Multiple consistency levels used in the batch, and no override...cannot pick one"
857
+ raise "Multiple consistency levels used in the batch, and no override...cannot pick one"
854
858
  else # if no consistency override has been provided but all the clevels in the batch are the same: use that one
855
859
  seen_clevels.first
856
860
  end
@@ -950,7 +954,7 @@ class Cassandra
950
954
 
951
955
  ##
952
956
  # This method is used to query a secondary index with a set of
953
- # provided search parameters
957
+ # provided search parameters.
954
958
  #
955
959
  # Please note that you can either specify a
956
960
  # CassandraThrift::IndexClause or an array of hashes with the
@@ -963,7 +967,7 @@ class Cassandra
963
967
  # * :comparison - Type of comparison to do.
964
968
  # * options
965
969
  # * :key_count - Set maximum number of rows to return. (Only works if CassandraThrift::IndexClause is not passed in.)
966
- # * :key_start - Set starting row key for search. (Only works if CassandraThrift::IndexClause is not passed in.)
970
+ # * :start_key - Set starting row key for search. (Only works if CassandraThrift::IndexClause is not passed in.)
967
971
  # * :consistency
968
972
  #
969
973
  # TODO: Supercolumn support.
@@ -971,14 +975,17 @@ class Cassandra
971
975
  return false if Cassandra.VERSION.to_f < 0.7
972
976
 
973
977
  column_family, columns, _, options =
974
- extract_and_validate_params(column_family, [], columns_and_options, READ_DEFAULTS.merge(:key_count => 100, :key_start => ""))
978
+ extract_and_validate_params(column_family, [], columns_and_options,
979
+ READ_DEFAULTS.merge(:key_count => 100, :start_key => nil, :key_start => nil))
980
+
981
+ start_key = options[:start_key] || options[:key_start] || ""
975
982
 
976
983
  if index_clause.class != CassandraThrift::IndexClause
977
984
  index_expressions = index_clause.collect do |expression|
978
985
  create_index_expression(expression[:column_name], expression[:value], expression[:comparison])
979
986
  end
980
987
 
981
- index_clause = create_index_clause(index_expressions, options[:key_start], options[:key_count])
988
+ index_clause = create_index_clause(index_expressions, start_key, options[:key_count])
982
989
  end
983
990
 
984
991
  key_slices = _get_indexed_slices(column_family, index_clause, columns, options[:count], options[:start],
@@ -1005,13 +1012,13 @@ class Cassandra
1005
1012
  @batch.each do |mutation_op|
1006
1013
  # A single mutation op looks like:
1007
1014
  # For an insert/update
1008
- #[ { key1 =>
1015
+ #[ { key1 =>
1009
1016
  # { CF1 => [several of CassThrift:Mutation(colname,value,TS,ttl)]
1010
1017
  # CF2 => [several mutations]
1011
1018
  # },
1012
1019
  # key2 => {...} # Not sure if they can come batched like this...so there might only be a single key (and CF)
1013
1020
  # }, # [0]
1014
- # consistency # [1]
1021
+ # consistency # [1]
1015
1022
  #]
1016
1023
  mmap = mutation_op[0] # :remove OR a hash like {"key"=> {"CF"=>[mutationclass1,...] } }
1017
1024
  used_clevels[mutation_op[1]] = true #save the clevel required for this operation