cassandra 0.15.0 → 0.16.0

Sign up to get free protection for your applications and to get access to all the features.
@@ -0,0 +1,44 @@
1
+ # Licensed to the Apache Software Foundation (ASF) under one
2
+ # or more contributor license agreements. See the NOTICE file
3
+ # distributed with this work for additional information
4
+ # regarding copyright ownership. The ASF licenses this file
5
+ # to you under the Apache License, Version 2.0 (the
6
+ # "License"); you may not use this file except in compliance
7
+ # with the License. You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ # for production, you should probably set pattern to %c instead of %l.
18
+ # (%l is slower.)
19
+
20
+ # output messages into a rolling log file as well as stdout
21
+ log4j.rootLogger=INFO,stdout,R
22
+
23
+ # stdout
24
+ log4j.appender.stdout=org.apache.log4j.ConsoleAppender
25
+ log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
26
+ log4j.appender.stdout.layout.ConversionPattern=%5p %d{HH:mm:ss,SSS} %m%n
27
+
28
+ # rolling log file
29
+ log4j.appender.R=org.apache.log4j.RollingFileAppender
30
+ log4j.appender.R.maxFileSize=20MB
31
+ log4j.appender.R.maxBackupIndex=50
32
+ log4j.appender.R.layout=org.apache.log4j.PatternLayout
33
+ log4j.appender.R.layout.ConversionPattern=%5p [%t] %d{ISO8601} %F (line %L) %m%n
34
+ # Edit the next line to point to your logs directory
35
+ log4j.appender.R.File=/var/log/cassandra/system.log
36
+
37
+ # Application logging options
38
+ #log4j.logger.org.apache.cassandra=DEBUG
39
+ #log4j.logger.org.apache.cassandra.db=DEBUG
40
+ #log4j.logger.org.apache.cassandra.service.StorageProxy=DEBUG
41
+
42
+ # Adding this to avoid thrift logging disconnect errors.
43
+ log4j.logger.org.apache.thrift.server.TNonblockingServer=ERROR
44
+
@@ -0,0 +1,72 @@
1
+ {"Twitter":{
2
+ "Users":{
3
+ "comparator_type":"org.apache.cassandra.db.marshal.UTF8Type",
4
+ "column_type":"Standard"},
5
+ "UserAudits":{
6
+ "comparator_type":"org.apache.cassandra.db.marshal.UTF8Type",
7
+ "column_type":"Standard"},
8
+ "UserCounters":{
9
+ "comparator_type":"org.apache.cassandra.db.marshal.UTF8Type",
10
+ "column_type":"Standard",
11
+ "default_validation_class":"CounterColumnType"},
12
+ "UserCounterAggregates":{
13
+ "subcomparator_type":"org.apache.cassandra.db.marshal.UTF8Type",
14
+ "comparator_type":"org.apache.cassandra.db.marshal.UTF8Type",
15
+ "column_type":"Super",
16
+ "default_validation_class":"CounterColumnType"},
17
+ "UserRelationships":{
18
+ "subcomparator_type":"org.apache.cassandra.db.marshal.TimeUUIDType",
19
+ "comparator_type":"org.apache.cassandra.db.marshal.UTF8Type",
20
+ "column_type":"Super"},
21
+ "Usernames":{
22
+ "comparator_type":"org.apache.cassandra.db.marshal.UTF8Type",
23
+ "column_type":"Standard"},
24
+ "Statuses":{
25
+ "comparator_type":"org.apache.cassandra.db.marshal.UTF8Type",
26
+ "column_type":"Standard"},
27
+ "StatusAudits":{
28
+ "comparator_type":"org.apache.cassandra.db.marshal.UTF8Type",
29
+ "column_type":"Standard"},
30
+ "StatusRelationships":{
31
+ "subcomparator_type":"org.apache.cassandra.db.marshal.TimeUUIDType",
32
+ "comparator_type":"org.apache.cassandra.db.marshal.UTF8Type",
33
+ "column_type":"Super"},
34
+ "Indexes":{
35
+ "comparator_type":"org.apache.cassandra.db.marshal.UTF8Type",
36
+ "column_type":"Super"},
37
+ "TimelinishThings":{
38
+ "comparator_type":"org.apache.cassandra.db.marshal.BytesType",
39
+ "column_type":"Standard"}
40
+ },
41
+ "Multiblog":{
42
+ "Blogs":{
43
+ "comparator_type":"org.apache.cassandra.db.marshal.TimeUUIDType",
44
+ "column_type":"Standard"},
45
+ "Comments":{
46
+ "comparator_type":"org.apache.cassandra.db.marshal.TimeUUIDType",
47
+ "column_type":"Standard"}
48
+ },
49
+ "MultiblogLong":{
50
+ "Blogs":{
51
+ "comparator_type":"org.apache.cassandra.db.marshal.LongType",
52
+ "column_type":"Standard"},
53
+ "Comments":{
54
+ "comparator_type":"org.apache.cassandra.db.marshal.LongType",
55
+ "column_type":"Standard"}
56
+ },
57
+ "TypeConversions":{
58
+ "UUIDColumnConversion":{
59
+ "comparator_type":"org.apache.cassandra.db.marshal.TimeUUIDType",
60
+ "column_type":"Standard"},
61
+ "SuperUUID":{
62
+ "subcomparator_type":"org.apache.cassandra.db.marshal.TimeUUIDType",
63
+ "comparator_type":"org.apache.cassandra.db.marshal.TimeUUIDType",
64
+ "column_type":"Super"},
65
+ "CompositeColumnConversion":{
66
+ "comparator_type":"org.apache.cassandra.db.marshal.CompositeType(org.apache.cassandra.db.marshal.IntegerType,org.apache.cassandra.db.marshal.UTF8Type)",
67
+ "column_type":"Standard"},
68
+ "DynamicComposite":{
69
+ "comparator_type":"org.apache.cassandra.db.marshal.DynamicCompositeType(u=>org.apache.cassandra.db.marshal.UUIDType,t=>org.apache.cassandra.db.marshal.TimeUUIDType,s=>org.apache.cassandra.db.marshal.UTF8Type,b=>org.apache.cassandra.db.marshal.BytesType,a=>org.apache.cassandra.db.marshal.AsciiType,l=>org.apache.cassandra.db.marshal.LongType,x=>org.apache.cassandra.db.marshal.LexicalUUIDType,i=>org.apache.cassandra.db.marshal.IntegerType)",
70
+ "column_type":"Standard"}
71
+ }
72
+ }
@@ -0,0 +1,57 @@
1
+ create keyspace Twitter with
2
+ placement_strategy = 'org.apache.cassandra.locator.SimpleStrategy' AND
3
+ strategy_options = {replication_factor:1};
4
+ use Twitter;
5
+ create column family Users with comparator = 'UTF8Type';
6
+ create column family UserAudits with comparator = 'UTF8Type';
7
+ create column family UserCounters with comparator = 'UTF8Type' and
8
+ default_validation_class = CounterColumnType;
9
+ create column family UserCounterAggregates with column_type = 'Super'
10
+ and comparator = 'UTF8Type' and
11
+ subcomparator = 'UTF8Type' and
12
+ default_validation_class = CounterColumnType;
13
+ create column family UserRelationships with
14
+ comparator = 'UTF8Type' and
15
+ column_type = 'Super' and
16
+ subcomparator = 'TimeUUIDType';
17
+ create column family Usernames with comparator = 'UTF8Type';
18
+ create column family Statuses
19
+ with comparator = 'UTF8Type'
20
+ and column_metadata = [
21
+ {column_name: 'tags', validation_class: 'BytesType', index_type: 'KEYS'}
22
+ ];
23
+ create column family StatusAudits with comparator = 'UTF8Type';
24
+ create column family StatusRelationships with
25
+ comparator = 'UTF8Type' and
26
+ column_type = 'Super' and
27
+ subcomparator = 'TimeUUIDType';
28
+ create column family Indexes with
29
+ comparator = 'UTF8Type' and
30
+ column_type = 'Super';
31
+ create column family TimelinishThings with
32
+ comparator = 'BytesType';
33
+
34
+ create keyspace Multiblog with
35
+ placement_strategy = 'org.apache.cassandra.locator.SimpleStrategy' AND
36
+ strategy_options = {replication_factor:1};
37
+ use Multiblog;
38
+ create column family Blogs with comparator = 'TimeUUIDType';
39
+ create column family Comments with comparator = 'TimeUUIDType';
40
+
41
+
42
+ create keyspace MultiblogLong with
43
+ placement_strategy = 'org.apache.cassandra.locator.SimpleStrategy' AND
44
+ strategy_options = {replication_factor:1};
45
+ use MultiblogLong;
46
+ create column family Blogs with comparator = 'LongType';
47
+ create column family Comments with comparator = 'LongType';
48
+
49
+ create keyspace TypeConversions with
50
+ placement_strategy = 'org.apache.cassandra.locator.SimpleStrategy' AND
51
+ strategy_options = {replication_factor:1};
52
+ use TypeConversions;
53
+ create column family UUIDColumnConversion with comparator = TimeUUIDType;
54
+ create column family SuperUUID with comparator = TimeUUIDType and column_type = Super;
55
+ create column family CompositeColumnConversion with comparator = 'CompositeType(IntegerType, UTF8Type)';
56
+ create column family DynamicComposite with comparator ='DynamicCompositeType
57
+ (a=>AsciiType,b=>BytesType,i=>IntegerType,x=>LexicalUUIDType,l=>LongType,t=>TimeUUIDType,s=>UTF8Type,u=>UUIDType)';
File without changes
data/ext/extconf.rb CHANGED
File without changes
data/lib/cassandra.rb CHANGED
@@ -28,6 +28,7 @@ require 'cassandra/dynamic_composite'
28
28
  require 'cassandra/ordered_hash'
29
29
  require 'cassandra/columns'
30
30
  require 'cassandra/protocol'
31
+ require 'cassandra/batch'
31
32
  require "cassandra/#{Cassandra.VERSION}/columns"
32
33
  require "cassandra/#{Cassandra.VERSION}/protocol"
33
34
  require "cassandra/cassandra"
@@ -1,25 +1 @@
1
- class Cassandra
2
-
3
- ## Counters
4
-
5
- # Add a value to the counter in cf:key:super column:column
6
- def add(column_family, key, value, *columns_and_options)
7
- column_family, column, sub_column, options = extract_and_validate_params(column_family, key, columns_and_options, WRITE_DEFAULTS)
8
-
9
- mutation_map = if is_super(column_family)
10
- {
11
- key => {
12
- column_family => [_super_counter_mutation(column_family, column, sub_column, value)]
13
- }
14
- }
15
- else
16
- {
17
- key => {
18
- column_family => [_standard_counter_mutation(column_family, column, value)]
19
- }
20
- }
21
- end
22
-
23
- @batch ? @batch << [mutation_map, options[:consistency]] : _mutate(mutation_map, options[:consistency])
24
- end
25
- end
1
+ require "#{File.expand_path(File.dirname(__FILE__))}/../0.8/cassandra"
@@ -1,28 +1 @@
1
- class Cassandra
2
- module Columns #:nodoc:
3
- def _standard_counter_mutation(column_family, column_name, value)
4
- CassandraThrift::Mutation.new(
5
- :column_or_supercolumn => CassandraThrift::ColumnOrSuperColumn.new(
6
- :counter_column => CassandraThrift::CounterColumn.new(
7
- :name => column_name_class(column_family).new(column_name).to_s,
8
- :value => value
9
- )
10
- )
11
- )
12
- end
13
-
14
- def _super_counter_mutation(column_family, super_column_name, sub_column, value)
15
- CassandraThrift::Mutation.new(:column_or_supercolumn =>
16
- CassandraThrift::ColumnOrSuperColumn.new(
17
- :counter_super_column => CassandraThrift::SuperColumn.new(
18
- :name => column_name_class(column_family).new(super_column_name).to_s,
19
- :columns => [CassandraThrift::CounterColumn.new(
20
- :name => sub_column_name_class(column_family).new(sub_column).to_s,
21
- :value => value
22
- )]
23
- )
24
- )
25
- )
26
- end
27
- end
28
- end
1
+ require "#{File.expand_path(File.dirname(__FILE__))}/../0.8/columns"
@@ -1,12 +1 @@
1
- require "#{File.expand_path(File.dirname(__FILE__))}/../0.7/protocol"
2
-
3
- class Cassandra
4
- # Inner methods for actually doing the Thrift calls
5
- module Protocol #:nodoc:
6
- private
7
-
8
- def _remove_counter(key, column_path, consistency_level)
9
- client.remove_counter(key, column_path, consistency_level)
10
- end
11
- end
12
- end
1
+ require "#{File.expand_path(File.dirname(__FILE__))}/../0.8/protocol"
@@ -0,0 +1,7 @@
1
+ class Cassandra
2
+ def self.VERSION
3
+ "1.1"
4
+ end
5
+ end
6
+
7
+ require "#{File.expand_path(File.dirname(__FILE__))}/../cassandra"
@@ -0,0 +1 @@
1
+ require "#{File.expand_path(File.dirname(__FILE__))}/../1.0/cassandra"
@@ -0,0 +1 @@
1
+ require "#{File.expand_path(File.dirname(__FILE__))}/../1.0/columns"
@@ -0,0 +1 @@
1
+ require "#{File.expand_path(File.dirname(__FILE__))}/../1.0/protocol"
@@ -0,0 +1,41 @@
1
+ class Cassandra
2
+ class Batch
3
+ include Enumerable
4
+
5
+ def initialize(cassandra, options)
6
+ @queue_size = options.delete(:queue_size) || 0
7
+ @cassandra = cassandra
8
+ @options = options
9
+ @batch_queue = []
10
+ end
11
+
12
+ ##
13
+ # Append mutation to the batch queue
14
+ # Flush the batch queue if full
15
+ #
16
+ def <<(mutation)
17
+ @batch_queue << mutation
18
+ if @queue_size > 0 and @batch_queue.length >= @queue_size
19
+ begin
20
+ @cassandra.flush_batch(@options)
21
+ ensure
22
+ @batch_queue = []
23
+ end
24
+ end
25
+ end
26
+
27
+ ##
28
+ # Implement each method (required by Enumerable)
29
+ #
30
+ def each(&block)
31
+ @batch_queue.each(&block)
32
+ end
33
+
34
+ ##
35
+ # Queue size
36
+ #
37
+ def length
38
+ @batch_queue.length
39
+ end
40
+ end
41
+ end
@@ -839,19 +839,31 @@ class Cassandra
839
839
 
840
840
  ##
841
841
  # Open a batch operation and yield self. Inserts and deletes will be queued
842
- # until the block closes, and then sent atomically to the server.
842
+ # until the block closes or the queue is full(if option :queue_size is set),
843
+ # and then sent atomically to the server.
843
844
  #
844
845
  # Supports the :consistency option, which overrides the consistency set in
845
846
  # the individual commands.
846
847
  #
847
848
  def batch(options = {})
849
+ @batch = Cassandra::Batch.new(self, options)
850
+
848
851
  _, _, _, options =
849
852
  extract_and_validate_params(schema.cf_defs.first.name, "", [options], WRITE_DEFAULTS)
850
853
 
851
- @batch = []
852
- yield(self)
853
- compacted_map,seen_clevels = compact_mutations!
854
- clevel = if options[:consistency] != nil # Override any clevel from individual mutations if
854
+ yield(self)
855
+ flush_batch(options)
856
+ ensure
857
+ @batch = nil
858
+ end
859
+
860
+ ##
861
+ # Send the batch queue to the server
862
+ #
863
+ def flush_batch(options)
864
+ compacted_map,seen_clevels = compact_mutations!
865
+
866
+ clevel = if options[:consistency] != nil # Override any clevel from individual mutations if
855
867
  options[:consistency]
856
868
  elsif seen_clevels.length > 1 # Cannot choose which CLevel to use if there are several ones
857
869
  raise "Multiple consistency levels used in the batch, and no override...cannot pick one"
@@ -859,11 +871,9 @@ class Cassandra
859
871
  seen_clevels.first
860
872
  end
861
873
 
862
- _mutate(compacted_map,clevel)
863
- ensure
864
- @batch = nil
874
+ _mutate(compacted_map,clevel)
865
875
  end
866
-
876
+
867
877
  ##
868
878
  # Create secondary index.
869
879
  #
File without changes
File without changes
@@ -74,16 +74,21 @@ class Cassandra
74
74
  end
75
75
  end
76
76
 
77
- def batch
78
- @batch = []
77
+ def batch(options={})
78
+ @batch = Cassandra::Batch.new(self, options)
79
79
  yield
80
+ flush_batch(options)
81
+ ensure
82
+ @batch = nil
83
+ end
84
+
85
+ def flush_batch(options)
80
86
  b = @batch
81
87
  @batch = nil
82
88
  b.each do |mutation|
83
89
  send(*mutation)
84
90
  end
85
- ensure
86
- @batch = nil
91
+ @batch = b
87
92
  end
88
93
 
89
94
  def get(column_family, key, *columns_and_options)
@@ -8,16 +8,16 @@ class CassandraTest < Test::Unit::TestCase
8
8
  end
9
9
 
10
10
  def setup
11
- @twitter = Cassandra.new('Twitter', "127.0.0.1:9160", :retries => 2, :connect_timeout => 0.1, :timeout => 5, :exception_classes => [])
11
+ @twitter = Cassandra.new('Twitter', "127.0.0.1:9160", :retries => 2, :connect_timeout => 1, :timeout => 5, :exception_classes => [])
12
12
  @twitter.clear_keyspace!
13
13
 
14
- @blogs = Cassandra.new('Multiblog', "127.0.0.1:9160", :retries => 2, :connect_timeout => 0.1, :timeout => 5, :exception_classes => [])
14
+ @blogs = Cassandra.new('Multiblog', "127.0.0.1:9160", :retries => 2, :connect_timeout => 1, :timeout => 5, :exception_classes => [])
15
15
  @blogs.clear_keyspace!
16
16
 
17
- @blogs_long = Cassandra.new('MultiblogLong', "127.0.0.1:9160", :retries => 2, :connect_timeout => 0.1, :timeout => 5, :exception_classes => [])
17
+ @blogs_long = Cassandra.new('MultiblogLong', "127.0.0.1:9160", :retries => 2, :connect_timeout => 1, :timeout => 5, :exception_classes => [])
18
18
  @blogs_long.clear_keyspace!
19
19
 
20
- @type_conversions = Cassandra.new('TypeConversions', "127.0.0.1:9160", :retries => 2, :connect_timeout => 0.1, :timeout => 5, :exception_classes => [])
20
+ @type_conversions = Cassandra.new('TypeConversions', "127.0.0.1:9160", :retries => 2, :connect_timeout => 1, :timeout => 5, :exception_classes => [])
21
21
  @type_conversions.clear_keyspace!
22
22
 
23
23
  Cassandra::WRITE_DEFAULTS[:consistency] = Cassandra::Consistency::ONE
@@ -615,6 +615,78 @@ class CassandraTest < Test::Unit::TestCase
615
615
 
616
616
  end
617
617
 
618
+ def test_batch_queue_size
619
+ k = key
620
+
621
+ @twitter.insert(:Users, k + '0', {'delete_me' => 'v0', 'keep_me' => 'v0'})
622
+ @twitter.insert(:Users, k + '1', {'body' => 'v1', 'user' => 'v1'})
623
+ initial_subcolumns = {@uuids[1] => 'v1', @uuids[2] => 'v2'}
624
+ @twitter.insert(:StatusRelationships, k, {'user_timelines' => initial_subcolumns, 'dummy_supercolumn' => {@uuids[5] => 'value'}})
625
+ assert_equal(initial_subcolumns, @twitter.get(:StatusRelationships, k, 'user_timelines'))
626
+ assert_equal({@uuids[5] => 'value'}, @twitter.get(:StatusRelationships, k, 'dummy_supercolumn'))
627
+ new_subcolumns = {@uuids[3] => 'v3', @uuids[4] => 'v4'}
628
+ subcolumn_to_delete = initial_subcolumns.keys.first # the first column of the initial set
629
+
630
+ @twitter.batch(:queue_size => 5) do
631
+ # Normal Columns
632
+ @twitter.insert(:Users, k + '2', {'body' => 'v2', 'user' => 'v2'})
633
+ @twitter.insert(:Users, k + '3', {'body' => 'bogus', 'user' => 'v3'})
634
+ @twitter.insert(:Users, k + '3', {'body' => 'v3', 'location' => 'v3'})
635
+ @twitter.insert(:Statuses, k + '3', {'body' => 'v'})
636
+
637
+ assert_equal({'delete_me' => 'v0', 'keep_me' => 'v0'}, @twitter.get(:Users, k + '0')) # Written
638
+ assert_equal({'body' => 'v1', 'user' => 'v1'}, @twitter.get(:Users, k + '1')) # Written
639
+ assert_equal({}, @twitter.get(:Users, k + '2')) # Not yet written
640
+ assert_equal({}, @twitter.get(:Statuses, k + '3')) # Not yet written
641
+ assert_equal({}, @twitter.get(:UserCounters, 'bob')) if CASSANDRA_VERSION.to_f >= 0.8 # Written
642
+
643
+ if CASSANDRA_VERSION.to_f >= 0.8
644
+ @twitter.add(:UserCounters, 'bob', 5, 'tweet_count')
645
+ else
646
+ @twitter.insert(:Users, k + '2', {'body' => 'v2', 'user' => 'v2'})
647
+ end
648
+ # Flush!
649
+
650
+ assert_equal({'body' => 'v2', 'user' => 'v2'}, @twitter.get(:Users, k + '2')) # Written
651
+ assert_equal({'body' => 'v3', 'user' => 'v3', 'location' => 'v3'}, @twitter.get(:Users, k + '3')) # Written and compacted
652
+ assert_equal({'body' => 'v'}, @twitter.get(:Statuses, k + '3')) # Written
653
+ assert_equal({'tweet_count' => 5}, @twitter.get(:UserCounters, 'bob')) if CASSANDRA_VERSION.to_f >= 0.8 # Written
654
+
655
+ @twitter.remove(:Users, k + '1') # Full row
656
+ @twitter.remove(:Users, k + '0', 'delete_me') # A single column of the row
657
+ @twitter.remove(:Users, k + '4')
658
+ @twitter.insert(:Users, k + '4', {'body' => 'v4', 'user' => 'v4'})
659
+
660
+ assert_equal({'body' => 'v1', 'user' => 'v1'}, @twitter.get(:Users, k + '1')) # Not yet removed
661
+ assert_equal({'delete_me' => 'v0', 'keep_me' => 'v0'}, @twitter.get(:Users, k + '0')) # Not yet removed
662
+ assert_equal({}, @twitter.get(:Users, k + '4')) # Not yet written
663
+
664
+ @twitter.insert(:Users, k + '5', {'body' => 'v5', 'user' => 'v5'})
665
+ # Flush!
666
+
667
+ assert_equal({'body' => 'v4', 'user' => 'v4'}, @twitter.get(:Users, k + '4')) # Written
668
+ assert_equal({}, @twitter.get(:Users, k + '1')) # Removed
669
+ assert_equal({ 'keep_me' => 'v0'}, @twitter.get(:Users, k + '0')) # 'delete_me' column removed
670
+
671
+ assert_equal({'body' => 'v2', 'user' => 'v2'}.keys.sort, @twitter.get(:Users, k + '2').timestamps.keys.sort) # Written
672
+ assert_equal({'body' => 'v3', 'user' => 'v3', 'location' => 'v3'}.keys.sort, @twitter.get(:Users, k + '3').timestamps.keys.sort) # Written and compacted
673
+ assert_equal({'body' => 'v4', 'user' => 'v4'}.keys.sort, @twitter.get(:Users, k + '4').timestamps.keys.sort) # Written
674
+ assert_equal({'body' => 'v'}.keys.sort, @twitter.get(:Statuses, k + '3').timestamps.keys.sort) # Written
675
+
676
+ # SuperColumns
677
+ # Add and delete new sub columns to the user timeline supercolumn
678
+ @twitter.insert(:StatusRelationships, k, {'user_timelines' => new_subcolumns })
679
+ @twitter.remove(:StatusRelationships, k, 'user_timelines' , subcolumn_to_delete ) # Delete the first of the initial_subcolumns from the user_timeline supercolumn
680
+ # Delete a complete supercolumn
681
+ @twitter.remove(:StatusRelationships, k, 'dummy_supercolumn' ) # Delete the full dummy supercolumn
682
+ end
683
+
684
+ # Final result: initial_subcolumns - initial_subcolumns.first + new_subcolumns
685
+ resulting_subcolumns = initial_subcolumns.merge(new_subcolumns).reject{|k2,v| k2 == subcolumn_to_delete }
686
+ assert_equal(resulting_subcolumns, @twitter.get(:StatusRelationships, key, 'user_timelines'))
687
+ assert_equal({}, @twitter.get(:StatusRelationships, key, 'dummy_supercolumn')) # dummy supercolumn deleted
688
+ end
689
+
618
690
  def test_complain_about_nil_key
619
691
  assert_raises(ArgumentError) do
620
692
  @twitter.insert(:Statuses, nil, {'text' => 'crap'})