wukong 1.5.3 → 1.5.4

Sign up to get free protection for your applications and to get access to all the features.
Files changed (43) hide show
  1. data/CHANGELOG.textile +4 -0
  2. data/bin/hdp-bin +44 -0
  3. data/bin/hdp-ls +2 -1
  4. data/docpages/avro/performance.textile +36 -0
  5. data/examples/cassandra_streaming/avromapper.rb +85 -0
  6. data/examples/cassandra_streaming/berlitz_for_cassandra.textile +22 -0
  7. data/examples/cassandra_streaming/cassandra.avpr +468 -0
  8. data/examples/cassandra_streaming/cassandra_random_partitioner.rb +62 -0
  9. data/examples/cassandra_streaming/catter.sh +45 -0
  10. data/examples/cassandra_streaming/client_interface_notes.textile +200 -0
  11. data/examples/cassandra_streaming/client_schema.avpr +211 -0
  12. data/examples/cassandra_streaming/client_schema.textile +318 -0
  13. data/examples/cassandra_streaming/foofile.avr +0 -0
  14. data/examples/cassandra_streaming/pymap.sh +1 -0
  15. data/examples/cassandra_streaming/pyreduce.sh +1 -0
  16. data/examples/cassandra_streaming/smutation.avpr +188 -0
  17. data/examples/cassandra_streaming/streamer.sh +51 -0
  18. data/examples/cassandra_streaming/struct_loader.rb +24 -0
  19. data/examples/cassandra_streaming/tuning.textile +73 -0
  20. data/examples/emr/README-elastic_map_reduce.textile +26 -0
  21. data/examples/emr/dot_wukong_dir/credentials.json +7 -0
  22. data/examples/emr/{emr.yaml → dot_wukong_dir/emr.yaml} +33 -16
  23. data/{bin/bootstrap.sh → examples/emr/dot_wukong_dir/emr_bootstrap.sh} +1 -1
  24. data/examples/emr/elastic_mapreduce_example.rb +1 -0
  25. data/lib/wukong/encoding/asciize.rb +108 -0
  26. data/lib/wukong/extensions/date_time.rb +33 -7
  27. data/lib/wukong/extensions/emittable.rb +12 -25
  28. data/lib/wukong/extensions/hash_like.rb +13 -6
  29. data/lib/wukong/filename_pattern.rb +8 -7
  30. data/lib/wukong/schema.rb +47 -0
  31. data/lib/wukong/script.rb +7 -0
  32. data/lib/wukong/script/cassandra_loader_script.rb +40 -0
  33. data/lib/wukong/script/emr_command.rb +74 -43
  34. data/lib/wukong/script/hadoop_command.rb +89 -72
  35. data/lib/wukong/store.rb +2 -7
  36. data/lib/wukong/store/cassandra.rb +10 -0
  37. data/lib/wukong/store/cassandra/streaming.rb +75 -0
  38. data/lib/wukong/store/cassandra/struct_loader.rb +21 -0
  39. data/lib/wukong/store/cassandra_model.rb +90 -0
  40. data/lib/wukong/store/chh_chunked_flat_file_store.rb +1 -1
  41. data/lib/wukong/store/chunked_flat_file_store.rb +24 -20
  42. data/wukong.gemspec +32 -4
  43. metadata +33 -14
@@ -1,3 +1,7 @@
1
+ h2. Wukong v1.5.4
2
+
3
+ * EMR support now works very well
4
+
1
5
  h2. Wukong v1.5.3
2
6
 
3
7
  * A couple of bugfixes. Sorry about that.
@@ -0,0 +1,44 @@
1
+ #!/usr/bin/env ruby
2
+
3
+ require 'rubygems'
4
+ require 'wukong'
5
+ require 'wukong/streamer/count_keys'
6
+
7
+ #
8
+ # Run locally for testing:
9
+ #
10
+ # hdp-cat /hdfs/sometable.tsv | head -n100 | ./hdp-bin --column=4 --bin_width=0.1 --map | sort | ./hdp-bin --reduce
11
+ #
12
+ # Run on a giant dataset:
13
+ #
14
+ # hdp-bin --run --column=4 --bin_width=0.1 /hdfs/sometable.tsv /hdfs/sometable_col4_binned
15
+ #
16
+
17
+ Settings.define :column, :default => 1, :type => Integer, :description => "The column to bin"
18
+ Settings.define :bin_width, :default => 0.5, :type => Float, :description => "What should the bin width be?"
19
+
20
+ module HadoopBinning
21
+
22
+ class Mapper < Wukong::Streamer::RecordStreamer
23
+
24
+ def initialize *args
25
+ super(*args)
26
+ @bin_width = options.bin_width
27
+ @column = options.column
28
+ end
29
+
30
+ def process *args
31
+ yield bin_field(args[@column])
32
+ end
33
+
34
+ def bin_field field
35
+ (field.to_f/@bin_width).round*@bin_width
36
+ end
37
+
38
+ end
39
+
40
+ class Reducer < Wukong::Streamer::CountKeys; end
41
+
42
+ end
43
+
44
+ Wukong::Script.new(HadoopBinning::Mapper, HadoopBinning::Reducer).run
data/bin/hdp-ls CHANGED
@@ -7,4 +7,5 @@ else
7
7
  action=ls
8
8
  fi
9
9
 
10
- exec hadoop dfs -$action "$@"
10
+ HADOOP_HOME=${HADOOP_HOME-/usr/lib/hadoop}
11
+ exec $HADOOP_HOME/bin/hadoop dfs -$action "$@"
@@ -0,0 +1,36 @@
1
+
2
+
3
+ h2. Bulk Streaming use cases
4
+
5
+ * Take a bunch of nightly calculations and need to flood it into the DB -- http://sna-projects.com/blog/2009/06/building-a-1-tb-data-cycle-at-linkedin-with-hadoop-and-project-voldemort/ In this case, it's important that the bulk load happen efficiently but with low stress on the DB. I'm willing to make it so that data streams to each cassandra node in the sort order and pre-partitioned just like the node wants to use it. Should run at the full streaming speed of the disk.
6
+
7
+ * Building a new table or moving a legacy database over to cassandra. I want to write from one or several nodes, probably not in the cluster, and data will be completely unpartitioned. I might be able to make some guarantees about uniqueness of keys and rows (that is, you'll generally only see a key once, and/or when you see a key it will contain the entire row). 20k inserts/s / receiving node.
8
+
9
+ * Using cassandra to replace HDFS. Replication is for compute, not for availability -- so efficient writing at consistency level ANY is important. Would like to get 100k inserts/s per receiving node.
10
+
11
+ * A brand new user wants to just stuff his goddamn data into the goddamn database and start playing with it. It had better be not-terribly-slow, and it had better be really easy to take whatever insane format it shows up in and cram that into the data hole. It should also be conceptually straighforward: it should look like I'm writing hashes or hashes of hashes.
12
+
13
+
14
+ ===========================================================================
15
+ From http://sna-projects.com/blog/2009/06/building-a-1-tb-data-cycle-at-linkedin-with-hadoop-and-project-voldemort/
16
+
17
+ Here are the times taken:
18
+
19
+ * 100GB: 28mins (400 mappers, 90 reducers)
20
+ * 512GB: 2hrs, 16mins (2313 mappers, 350 reducers)
21
+ * 1TB: 5hrs, 39mins (4608 mappers, 700 reducers)
22
+
23
+ Data transfer between the clusters happens at a steady rate bound by the disk or network. For our Amazon instances this is around 40MB/second.
24
+
25
+ Online Performance
26
+
27
+ Lookup time for a single Voldemort node compares well to a single MySQL instance as well. To test this we ran local tests against the 100GB per-node data from the 1 TB test. This test as well was run on an Amazon Extra Large instance with 15GB of RAM and the 4 ephemeral disks in a RAID 10 configuration. To run the tests we simulated we simulated 1 million requests from a real request stream recorded on our production system against each of storage systems. We see the following performance for 1 million requests against a single node:
28
+ MySQL Voldemort
29
+ Reqs per sec. 727 1291
30
+ Median req. time 0.23 ms 0.05 ms
31
+ Avg. req. time 13.7 ms 7.7 ms
32
+
33
+ 99th percentile req. time
34
+ 127.2 ms 100.7 ms
35
+
36
+ These numbers are both for local requests with no network involved as the only intention is to benchmark the storage layer of these systems.
@@ -0,0 +1,85 @@
1
+ #!/usr/bin/env ruby
2
+
3
+ # To install avro gem
4
+ # cd avro/lang/ruby ; gem package ; sudo gem install pkg/avro-1.4.0.pre1.gem
5
+
6
+ require 'rubygems'
7
+ require 'avro'
8
+ require 'wukong'
9
+ require 'wukong/periodic_monitor'
10
+
11
+ Settings.define :cassandra_avro_schema, :default => ('/usr/local/share/cassandra/interface/avro/cassandra.avpr')
12
+ Settings.define :cassandra_thrift_uri, :default => `hostname`.chomp.strip+':9160'
13
+ Settings.define :log_interval, :default => 10_000
14
+
15
+ class AvroStreamer < Wukong::Streamer::RecordStreamer
16
+ def initialize *args
17
+ super(*args)
18
+ @writer = SmutWriter.new
19
+ @log = PeriodicMonitor.new
20
+ end
21
+
22
+ def process word, count, *_
23
+ @writer.write_directly(word, 'count', count)
24
+ @log.periodically( word, count )
25
+ end
26
+ end
27
+
28
+ class SmutWriter
29
+ # Reads in the protocol schema
30
+ # creates the necessary encoder and writer.
31
+ def initialize
32
+ schema_file = Settings.cassandra_avro_schema
33
+ @proto = Avro::Protocol.parse(File.read(schema_file))
34
+ @schema = @proto.types.detect{|schema| schema.name == 'StreamingMutation'}
35
+ @enc = Avro::IO::BinaryEncoder.new($stdout)
36
+ @writer = Avro::IO::DatumWriter.new(@schema)
37
+ end
38
+
39
+ # Directly write the simplified StreamingMutation schema; uses patch from @stuhood
40
+ def write_directly key, col_name, value
41
+ @enc.write_bytes(key)
42
+ @enc.write_bytes(col_name)
43
+ @enc.write_bytes(value)
44
+ @enc.write_long(Time.epoch_microseconds)
45
+ @enc.write_int(0)
46
+ end
47
+
48
+ # Write using the datumwriter
49
+ def write key, col_name, value
50
+ @writer.write(smutation(key, col_name, value), @enc)
51
+ end
52
+
53
+ # Simplified StreamingMutation schema uses patch from @stuhood
54
+ def smutation key, name, value
55
+ {
56
+ 'key' => key,
57
+ 'name' => name.to_s,
58
+ 'value' => value.to_s,
59
+ 'timestamp' => Time.epoch_microseconds,
60
+ 'ttl' => 0
61
+ }
62
+ end
63
+
64
+ # The StreamingMutation schema defined in trunk.
65
+ # Becomes monstrously inefficient due to implementation of unions.
66
+ def smutation_from_trunk key, name, value
67
+ {
68
+ 'key' => key,
69
+ 'mutation' => { 'column_or_supercolumn' => { 'column' => {
70
+ 'name' => name.to_s,
71
+ 'value' => value.to_s,
72
+ 'clock' => { 'timestamp' => Time.epoch_microseconds },
73
+ 'ttl' => 0
74
+ }}}
75
+ }
76
+ end
77
+ end
78
+
79
+ Time.class_eval do
80
+ def self.epoch_microseconds
81
+ (Time.now.utc.to_i * 1_000_000)
82
+ end
83
+ end
84
+
85
+ Wukong::Script.new(AvroStreamer, nil, :map_speculative => false).run
@@ -0,0 +1,22 @@
1
+
2
+
3
+
4
+ * concurrent_reads, concurrent_writes
5
+
6
+ * ColumnFamily, SuperColumnFamily
7
+
8
+ * Keyspace
9
+
10
+ create temp table uncommonplaces (
11
+ id integer primary key asc,
12
+ nvisits integer)
13
+ ;
14
+
15
+
16
+ mmap means index data files and index files
17
+ mmap in cassandra consumes up to 2GB
18
+
19
+ the data we store in the ring is immutable (most of it) or live but independently replay-able (some of it). If I lost say an hour of writes, that's not such a big deal; I could re-play or loa. Is it safe
20
+
21
+
22
+ * Don't use the cli. Use Ruby (irb) or Python
@@ -0,0 +1,468 @@
1
+ {
2
+ "protocol" : "Cassandra",
3
+ "namespace" : "org.apache.cassandra.avro",
4
+ "types" : [ {
5
+ "type" : "enum",
6
+ "name" : "AccessLevel",
7
+ "symbols" : [ "NONE", "READONLY", "READWRITE", "FALL" ]
8
+ }, {
9
+ "type" : "record",
10
+ "name" : "ColumnPath",
11
+ "fields" : [ {
12
+ "name" : "column_family",
13
+ "type" : "string"
14
+ }, {
15
+ "name" : "super_column",
16
+ "type" : [ "bytes", "null" ]
17
+ }, {
18
+ "name" : "column",
19
+ "type" : [ "bytes", "null" ]
20
+ } ]
21
+ }, {
22
+ "type" : "record",
23
+ "name" : "ColumnParent",
24
+ "fields" : [ {
25
+ "name" : "column_family",
26
+ "type" : "string"
27
+ }, {
28
+ "name" : "super_column",
29
+ "type" : [ "bytes", "null" ]
30
+ } ]
31
+ }, {
32
+ "type" : "record",
33
+ "name" : "Clock",
34
+ "fields" : [ {
35
+ "name" : "timestamp",
36
+ "type" : "long"
37
+ } ]
38
+ }, {
39
+ "type" : "record",
40
+ "name" : "Column",
41
+ "fields" : [ {
42
+ "name" : "name",
43
+ "type" : "bytes"
44
+ }, {
45
+ "name" : "value",
46
+ "type" : "bytes"
47
+ }, {
48
+ "name" : "clock",
49
+ "type" : "Clock"
50
+ }, {
51
+ "name" : "ttl",
52
+ "type" : [ "int", "null" ]
53
+ } ]
54
+ }, {
55
+ "type" : "record",
56
+ "name" : "SuperColumn",
57
+ "fields" : [ {
58
+ "name" : "name",
59
+ "type" : "bytes"
60
+ }, {
61
+ "name" : "columns",
62
+ "type" : {
63
+ "type" : "array",
64
+ "items" : "Column"
65
+ }
66
+ } ]
67
+ }, {
68
+ "type" : "record",
69
+ "name" : "ColumnOrSuperColumn",
70
+ "fields" : [ {
71
+ "name" : "column",
72
+ "type" : [ "Column", "null" ]
73
+ }, {
74
+ "name" : "super_column",
75
+ "type" : [ "SuperColumn", "null" ]
76
+ } ]
77
+ }, {
78
+ "type" : "record",
79
+ "name" : "SliceRange",
80
+ "fields" : [ {
81
+ "name" : "start",
82
+ "type" : "bytes"
83
+ }, {
84
+ "name" : "finish",
85
+ "type" : "bytes"
86
+ }, {
87
+ "name" : "reversed",
88
+ "type" : "boolean"
89
+ }, {
90
+ "name" : "count",
91
+ "type" : "int"
92
+ }, {
93
+ "name" : "bitmasks",
94
+ "type" : [ {
95
+ "type" : "array",
96
+ "items" : "bytes"
97
+ }, "null" ]
98
+ } ]
99
+ }, {
100
+ "type" : "record",
101
+ "name" : "SlicePredicate",
102
+ "fields" : [ {
103
+ "name" : "column_names",
104
+ "type" : [ {
105
+ "type" : "array",
106
+ "items" : "bytes"
107
+ }, "null" ]
108
+ }, {
109
+ "name" : "slice_range",
110
+ "type" : [ "SliceRange", "null" ]
111
+ } ]
112
+ }, {
113
+ "type" : "record",
114
+ "name" : "Deletion",
115
+ "fields" : [ {
116
+ "name" : "clock",
117
+ "type" : "Clock"
118
+ }, {
119
+ "name" : "super_column",
120
+ "type" : [ "bytes", "null" ]
121
+ }, {
122
+ "name" : "predicate",
123
+ "type" : [ "SlicePredicate", "null" ]
124
+ } ]
125
+ }, {
126
+ "type" : "record",
127
+ "name" : "Mutation",
128
+ "fields" : [ {
129
+ "name" : "column_or_supercolumn",
130
+ "type" : [ "ColumnOrSuperColumn", "null" ]
131
+ }, {
132
+ "name" : "deletion",
133
+ "type" : [ "Deletion", "null" ]
134
+ } ]
135
+ }, {
136
+ "type" : "enum",
137
+ "name" : "IndexType",
138
+ "symbols" : [ "KEYS" ]
139
+ }, {
140
+ "type" : "record",
141
+ "name" : "ColumnDef",
142
+ "fields" : [ {
143
+ "name" : "name",
144
+ "type" : "bytes"
145
+ }, {
146
+ "name" : "validation_class",
147
+ "type" : "string"
148
+ }, {
149
+ "name" : "index_type",
150
+ "type" : [ "IndexType", "null" ]
151
+ }, {
152
+ "name" : "index_name",
153
+ "type" : [ "string", "null" ]
154
+ } ]
155
+ }, {
156
+ "type" : "record",
157
+ "name" : "CfDef",
158
+ "fields" : [ {
159
+ "name" : "keyspace",
160
+ "type" : "string"
161
+ }, {
162
+ "name" : "name",
163
+ "type" : "string"
164
+ }, {
165
+ "name" : "column_type",
166
+ "type" : [ "string", "null" ]
167
+ }, {
168
+ "name" : "clock_type",
169
+ "type" : [ "string", "null" ]
170
+ }, {
171
+ "name" : "comparator_type",
172
+ "type" : [ "string", "null" ]
173
+ }, {
174
+ "name" : "subcomparator_type",
175
+ "type" : [ "string", "null" ]
176
+ }, {
177
+ "name" : "reconciler",
178
+ "type" : [ "string", "null" ]
179
+ }, {
180
+ "name" : "comment",
181
+ "type" : [ "string", "null" ]
182
+ }, {
183
+ "name" : "row_cache_size",
184
+ "type" : [ "double", "null" ]
185
+ }, {
186
+ "name" : "preload_row_cache",
187
+ "type" : [ "boolean", "null" ]
188
+ }, {
189
+ "name" : "key_cache_size",
190
+ "type" : [ "double", "null" ]
191
+ }, {
192
+ "name" : "read_repair_chance",
193
+ "type" : [ "double", "null" ]
194
+ }, {
195
+ "name" : "gc_grace_seconds",
196
+ "type" : [ "int", "null" ]
197
+ }, {
198
+ "name" : "column_metadata",
199
+ "type" : [ {
200
+ "type" : "array",
201
+ "items" : "ColumnDef"
202
+ }, "null" ]
203
+ }, {
204
+ "name" : "id",
205
+ "type" : [ "int", "null" ]
206
+ } ]
207
+ }, {
208
+ "type" : "record",
209
+ "name" : "KsDef",
210
+ "fields" : [ {
211
+ "name" : "name",
212
+ "type" : "string"
213
+ }, {
214
+ "name" : "strategy_class",
215
+ "type" : "string"
216
+ }, {
217
+ "name" : "strategy_options",
218
+ "type" : [ {
219
+ "type" : "map",
220
+ "values" : "string"
221
+ }, "null" ]
222
+ }, {
223
+ "name" : "replication_factor",
224
+ "type" : "int"
225
+ }, {
226
+ "name" : "cf_defs",
227
+ "type" : {
228
+ "type" : "array",
229
+ "items" : "CfDef"
230
+ }
231
+ } ]
232
+ }, {
233
+ "type" : "record",
234
+ "name" : "StreamingMutation",
235
+ "fields" : [ {
236
+ "name" : "key",
237
+ "type" : "bytes"
238
+ }, {
239
+ "name" : "name",
240
+ "type" : "bytes"
241
+ }, {
242
+ "name" : "value",
243
+ "type" : "bytes"
244
+ }, {
245
+ "name" : "timestamp",
246
+ "type" : "long"
247
+ }, {
248
+ "name" : "ttl",
249
+ "type" : "int"
250
+ } ]
251
+ }, {
252
+ "type" : "record",
253
+ "name" : "MutationsMapEntry",
254
+ "fields" : [ {
255
+ "name" : "key",
256
+ "type" : "bytes"
257
+ }, {
258
+ "name" : "mutations",
259
+ "type" : {
260
+ "type" : "map",
261
+ "values" : {
262
+ "type" : "array",
263
+ "items" : "Mutation"
264
+ }
265
+ }
266
+ } ]
267
+ }, {
268
+ "type" : "record",
269
+ "name" : "CoscsMapEntry",
270
+ "fields" : [ {
271
+ "name" : "key",
272
+ "type" : "bytes"
273
+ }, {
274
+ "name" : "columns",
275
+ "type" : {
276
+ "type" : "array",
277
+ "items" : "ColumnOrSuperColumn"
278
+ }
279
+ } ]
280
+ }, {
281
+ "type" : "enum",
282
+ "name" : "ConsistencyLevel",
283
+ "symbols" : [ "ZERO", "ONE", "QUORUM", "DCQUORUM", "DCQUORUMSYNC", "ALL" ]
284
+ }, {
285
+ "type" : "error",
286
+ "name" : "InvalidRequestException",
287
+ "fields" : [ {
288
+ "name" : "why",
289
+ "type" : [ "string", "null" ]
290
+ } ]
291
+ }, {
292
+ "type" : "error",
293
+ "name" : "NotFoundException",
294
+ "fields" : [ {
295
+ "name" : "why",
296
+ "type" : [ "string", "null" ]
297
+ } ]
298
+ }, {
299
+ "type" : "error",
300
+ "name" : "UnavailableException",
301
+ "fields" : [ {
302
+ "name" : "why",
303
+ "type" : [ "string", "null" ]
304
+ } ]
305
+ }, {
306
+ "type" : "error",
307
+ "name" : "TimedOutException",
308
+ "fields" : [ {
309
+ "name" : "why",
310
+ "type" : [ "string", "null" ]
311
+ } ]
312
+ } ],
313
+ "messages" : {
314
+ "get" : {
315
+ "request" : [ {
316
+ "name" : "key",
317
+ "type" : "bytes"
318
+ }, {
319
+ "name" : "column_path",
320
+ "type" : "ColumnPath"
321
+ }, {
322
+ "name" : "consistency_level",
323
+ "type" : "ConsistencyLevel"
324
+ } ],
325
+ "response" : "ColumnOrSuperColumn",
326
+ "errors" : [ "InvalidRequestException", "NotFoundException", "UnavailableException", "TimedOutException" ]
327
+ },
328
+ "get_slice" : {
329
+ "request" : [ {
330
+ "name" : "key",
331
+ "type" : "bytes"
332
+ }, {
333
+ "name" : "column_parent",
334
+ "type" : "ColumnParent"
335
+ }, {
336
+ "name" : "predicate",
337
+ "type" : "SlicePredicate"
338
+ }, {
339
+ "name" : "consistency_level",
340
+ "type" : "ConsistencyLevel"
341
+ } ],
342
+ "response" : {
343
+ "type" : "array",
344
+ "items" : "ColumnOrSuperColumn"
345
+ },
346
+ "errors" : [ "InvalidRequestException", "UnavailableException", "TimedOutException" ]
347
+ },
348
+ "multiget_slice" : {
349
+ "request" : [ {
350
+ "name" : "keys",
351
+ "type" : {
352
+ "type" : "array",
353
+ "items" : "bytes"
354
+ }
355
+ }, {
356
+ "name" : "column_parent",
357
+ "type" : "ColumnParent"
358
+ }, {
359
+ "name" : "predicate",
360
+ "type" : "SlicePredicate"
361
+ }, {
362
+ "name" : "consistency_level",
363
+ "type" : "ConsistencyLevel"
364
+ } ],
365
+ "response" : {
366
+ "type" : "array",
367
+ "items" : "CoscsMapEntry"
368
+ },
369
+ "errors" : [ "InvalidRequestException", "UnavailableException", "TimedOutException" ]
370
+ },
371
+ "get_count" : {
372
+ "request" : [ {
373
+ "name" : "key",
374
+ "type" : "bytes"
375
+ }, {
376
+ "name" : "column_parent",
377
+ "type" : "ColumnParent"
378
+ }, {
379
+ "name" : "predicate",
380
+ "type" : "SlicePredicate"
381
+ }, {
382
+ "name" : "consistency_level",
383
+ "type" : "ConsistencyLevel"
384
+ } ],
385
+ "response" : "int",
386
+ "errors" : [ "InvalidRequestException", "UnavailableException", "TimedOutException" ]
387
+ },
388
+ "insert" : {
389
+ "request" : [ {
390
+ "name" : "key",
391
+ "type" : "bytes"
392
+ }, {
393
+ "name" : "column_parent",
394
+ "type" : "ColumnParent"
395
+ }, {
396
+ "name" : "column",
397
+ "type" : "Column"
398
+ }, {
399
+ "name" : "consistency_level",
400
+ "type" : "ConsistencyLevel"
401
+ } ],
402
+ "response" : "null",
403
+ "errors" : [ "InvalidRequestException", "UnavailableException", "TimedOutException" ]
404
+ },
405
+ "remove" : {
406
+ "request" : [ {
407
+ "name" : "key",
408
+ "type" : "bytes"
409
+ }, {
410
+ "name" : "column_path",
411
+ "type" : "ColumnPath"
412
+ }, {
413
+ "name" : "clock",
414
+ "type" : "Clock"
415
+ }, {
416
+ "name" : "consistency_level",
417
+ "type" : "ConsistencyLevel"
418
+ } ],
419
+ "response" : "null",
420
+ "errors" : [ "InvalidRequestException", "UnavailableException", "TimedOutException" ]
421
+ },
422
+ "batch_mutate" : {
423
+ "request" : [ {
424
+ "name" : "mutation_map",
425
+ "type" : {
426
+ "type" : "array",
427
+ "items" : "MutationsMapEntry"
428
+ }
429
+ }, {
430
+ "name" : "consistency_level",
431
+ "type" : "ConsistencyLevel"
432
+ } ],
433
+ "response" : "null",
434
+ "errors" : [ "InvalidRequestException", "UnavailableException", "TimedOutException" ]
435
+ },
436
+ "system_add_keyspace" : {
437
+ "request" : [ {
438
+ "name" : "ks_def",
439
+ "type" : "KsDef"
440
+ } ],
441
+ "response" : "null",
442
+ "errors" : [ "InvalidRequestException" ]
443
+ },
444
+ "set_keyspace" : {
445
+ "request" : [ {
446
+ "name" : "keyspace",
447
+ "type" : "string"
448
+ } ],
449
+ "response" : "null",
450
+ "errors" : [ "InvalidRequestException" ]
451
+ },
452
+ "describe_keyspaces" : {
453
+ "request" : [ ],
454
+ "response" : {
455
+ "type" : "array",
456
+ "items" : "string"
457
+ }
458
+ },
459
+ "describe_cluster_name" : {
460
+ "request" : [ ],
461
+ "response" : "string"
462
+ },
463
+ "describe_version" : {
464
+ "request" : [ ],
465
+ "response" : "string"
466
+ }
467
+ }
468
+ }