mcmire-cassandra 0.12.2 → 0.12.3

Sign up to get free protection for your applications and to get access to all the features.
data/CHANGELOG CHANGED
@@ -1,7 +1,12 @@
1
+ v0.12.3
2
+ - Fix gemspec so that it includes the proper files for the composite column names feature
3
+
1
4
  v0.12.2
2
5
  - Respect the start_key in get_range. Resolves Issue #127.
3
- - Fix issue with differences in gemspec and what is required. Resolves Issue #125.
4
- - Update to Cassandra 0.8.7 and 1.0.2.
6
+ - Fix issue with differences in gemspec and what is required wrt thrift_client. Resolves Issue #125.
7
+ - Update to Cassandra 0.8.7 and 1.0.6.
8
+ - Add support for timestamps to Cassandra::Mock
9
+ - Add initial support for composite column names
5
10
 
6
11
  v0.12.1
7
12
  - Fix issue with simple_uuid dependency.
data/Gemfile ADDED
@@ -0,0 +1,8 @@
1
+ source "http://rubygems.org"
2
+
3
+ gemspec
4
+
5
+ group :development do
6
+ gem 'echoe'
7
+ gem 'eventmachine'
8
+ end
data/Manifest CHANGED
@@ -1,4 +1,5 @@
1
1
  CHANGELOG
2
+ Gemfile
2
3
  LICENSE
3
4
  Manifest
4
5
  README.md
@@ -18,6 +19,11 @@ conf/0.8/cassandra.yaml
18
19
  conf/0.8/log4j-server.properties
19
20
  conf/0.8/schema.json
20
21
  conf/0.8/schema.txt
22
+ conf/1.0/cassandra.in.sh
23
+ conf/1.0/cassandra.yaml
24
+ conf/1.0/log4j-server.properties
25
+ conf/1.0/schema.json
26
+ conf/1.0/schema.txt
21
27
  lib/cassandra.rb
22
28
  lib/cassandra/0.6.rb
23
29
  lib/cassandra/0.6/cassandra.rb
@@ -31,11 +37,16 @@ lib/cassandra/0.8.rb
31
37
  lib/cassandra/0.8/cassandra.rb
32
38
  lib/cassandra/0.8/columns.rb
33
39
  lib/cassandra/0.8/protocol.rb
40
+ lib/cassandra/1.0.rb
41
+ lib/cassandra/1.0/cassandra.rb
42
+ lib/cassandra/1.0/columns.rb
43
+ lib/cassandra/1.0/protocol.rb
34
44
  lib/cassandra/array.rb
35
45
  lib/cassandra/cassandra.rb
36
46
  lib/cassandra/column_family.rb
37
47
  lib/cassandra/columns.rb
38
48
  lib/cassandra/comparable.rb
49
+ lib/cassandra/composite.rb
39
50
  lib/cassandra/constants.rb
40
51
  lib/cassandra/debug.rb
41
52
  lib/cassandra/helpers.rb
@@ -45,10 +56,12 @@ lib/cassandra/mock.rb
45
56
  lib/cassandra/ordered_hash.rb
46
57
  lib/cassandra/protocol.rb
47
58
  lib/cassandra/time.rb
59
+ mcmire-cassandra.gemspec
48
60
  test/cassandra_client_test.rb
49
61
  test/cassandra_mock_test.rb
50
62
  test/cassandra_test.rb
51
63
  test/comparable_types_test.rb
64
+ test/composite_type_test.rb
52
65
  test/eventmachine_test.rb
53
66
  test/ordered_hash_test.rb
54
67
  test/test_helper.rb
@@ -61,3 +74,6 @@ vendor/0.7/gen-rb/cassandra_types.rb
61
74
  vendor/0.8/gen-rb/cassandra.rb
62
75
  vendor/0.8/gen-rb/cassandra_constants.rb
63
76
  vendor/0.8/gen-rb/cassandra_types.rb
77
+ vendor/1.0/gen-rb/cassandra.rb
78
+ vendor/1.0/gen-rb/cassandra_constants.rb
79
+ vendor/1.0/gen-rb/cassandra_types.rb
@@ -61,6 +61,9 @@
61
61
  "SuperUUID":{
62
62
  "subcomparator_type":"org.apache.cassandra.db.marshal.TimeUUIDType",
63
63
  "comparator_type":"org.apache.cassandra.db.marshal.TimeUUIDType",
64
- "column_type":"Super"}
64
+ "column_type":"Super"},
65
+ "CompositeColumnConversion":{
66
+ "comparator_type":"org.apache.cassandra.db.marshal.CompositeType(org.apache.cassandra.db.marshal.IntegerType,org.apache.cassandra.db.marshal.UTF8Type)",
67
+ "column_type":"Standard"}
65
68
  }
66
69
  }
@@ -48,4 +48,4 @@ create keyspace TypeConversions with
48
48
  use TypeConversions;
49
49
  create column family UUIDColumnConversion with comparator = TimeUUIDType;
50
50
  create column family SuperUUID with comparator = TimeUUIDType and column_type = Super;
51
-
51
+ create column family CompositeColumnConversion with comparator = 'CompositeType(IntegerType, UTF8Type)';
@@ -0,0 +1,41 @@
1
+ # Licensed to the Apache Software Foundation (ASF) under one
2
+ # or more contributor license agreements. See the NOTICE file
3
+ # distributed with this work for additional information
4
+ # regarding copyright ownership. The ASF licenses this file
5
+ # to you under the Apache License, Version 2.0 (the
6
+ # "License"); you may not use this file except in compliance
7
+ # with the License. You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ if [ "x$CASSANDRA_HOME" = "x" ]; then
18
+ CASSANDRA_HOME=`dirname $0`/..
19
+ fi
20
+
21
+ # The directory where Cassandra's configs live (required)
22
+ if [ "x$CASSANDRA_CONF" = "x" ]; then
23
+ CASSANDRA_CONF=$CASSANDRA_HOME/conf
24
+ fi
25
+
26
+ # This can be the path to a jar file, or a directory containing the
27
+ # compiled classes. NOTE: This isn't needed by the startup script,
28
+ # it's just used here in constructing the classpath.
29
+ cassandra_bin=$CASSANDRA_HOME/build/classes/main
30
+ cassandra_bin=$cassandra_bin:$CASSANDRA_HOME/build/classes/thrift
31
+ #cassandra_bin=$cassandra_home/build/cassandra.jar
32
+
33
+ # JAVA_HOME can optionally be set here
34
+ #JAVA_HOME=/usr/local/jdk6
35
+
36
+ # The java classpath (required)
37
+ CLASSPATH=$CASSANDRA_CONF:$cassandra_bin
38
+
39
+ for jar in $CASSANDRA_HOME/lib/*.jar; do
40
+ CLASSPATH=$CLASSPATH:$jar
41
+ done
@@ -0,0 +1,415 @@
1
+ # Cassandra storage config YAML
2
+
3
+ # NOTE:
4
+ # See http://wiki.apache.org/cassandra/StorageConfiguration for
5
+ # full explanations of configuration directives
6
+ # /NOTE
7
+
8
+ # The name of the cluster. This is mainly used to prevent machines in
9
+ # one logical cluster from joining another.
10
+ cluster_name: 'Test Cluster'
11
+
12
+ # You should always specify InitialToken when setting up a production
13
+ # cluster for the first time, and often when adding capacity later.
14
+ # The principle is that each node should be given an equal slice of
15
+ # the token ring; see http://wiki.apache.org/cassandra/Operations
16
+ # for more details.
17
+ #
18
+ # If blank, Cassandra will request a token bisecting the range of
19
+ # the heaviest-loaded existing node. If there is no load information
20
+ # available, such as is the case with a new cluster, it will pick
21
+ # a random token, which will lead to hot spots.
22
+ initial_token: 0
23
+
24
+ # See http://wiki.apache.org/cassandra/HintedHandoff
25
+ hinted_handoff_enabled: true
26
+ # this defines the maximum amount of time a dead host will have hints
27
+ # generated. After it has been dead this long, hints will be dropped.
28
+ max_hint_window_in_ms: 3600000 # one hour
29
+ # Sleep this long after delivering each row or row fragment
30
+ hinted_handoff_throttle_delay_in_ms: 50
31
+
32
+ # authentication backend, implementing IAuthenticator; used to identify users
33
+ authenticator: org.apache.cassandra.auth.AllowAllAuthenticator
34
+
35
+ # authorization backend, implementing IAuthority; used to limit access/provide permissions
36
+ authority: org.apache.cassandra.auth.AllowAllAuthority
37
+
38
+ # The partitioner is responsible for distributing rows (by key) across
39
+ # nodes in the cluster. Any IPartitioner may be used, including your
40
+ # own as long as it is on the classpath. Out of the box, Cassandra
41
+ # provides org.apache.cassandra.dht.RandomPartitioner
42
+ # org.apache.cassandra.dht.ByteOrderedPartitioner,
43
+ # org.apache.cassandra.dht.OrderPreservingPartitioner (deprecated),
44
+ # and org.apache.cassandra.dht.CollatingOrderPreservingPartitioner
45
+ # (deprecated).
46
+ #
47
+ # - RandomPartitioner distributes rows across the cluster evenly by md5.
48
+ # When in doubt, this is the best option.
49
+ # - ByteOrderedPartitioner orders rows lexically by key bytes. BOP allows
50
+ # scanning rows in key order, but the ordering can generate hot spots
51
+ # for sequential insertion workloads.
52
+ # - OrderPreservingPartitioner is an obsolete form of BOP, that stores
53
+ # - keys in a less-efficient format and only works with keys that are
54
+ # UTF8-encoded Strings.
55
+ # - CollatingOPP colates according to EN,US rules rather than lexical byte
56
+ # ordering. Use this as an example if you need custom collation.
57
+ #
58
+ # See http://wiki.apache.org/cassandra/Operations for more on
59
+ # partitioners and token selection.
60
+ partitioner: org.apache.cassandra.dht.RandomPartitioner
61
+
62
+ # directories where Cassandra should store data on disk.
63
+ data_file_directories:
64
+ - data/data
65
+
66
+ # commit log
67
+ commitlog_directory: data/commitlog
68
+
69
+ # saved caches
70
+ saved_caches_directory: data/saved_caches
71
+
72
+ # commitlog_sync may be either "periodic" or "batch."
73
+ # When in batch mode, Cassandra won't ack writes until the commit log
74
+ # has been fsynced to disk. It will wait up to
75
+ # commitlog_sync_batch_window_in_ms milliseconds for other writes, before
76
+ # performing the sync.
77
+ #
78
+ # commitlog_sync: batch
79
+ # commitlog_sync_batch_window_in_ms: 50
80
+ #
81
+ # the other option is "periodic" where writes may be acked immediately
82
+ # and the CommitLog is simply synced every commitlog_sync_period_in_ms
83
+ # milliseconds.
84
+ commitlog_sync: periodic
85
+ commitlog_sync_period_in_ms: 10000
86
+
87
+ # any class that implements the SeedProvider interface and has a
88
+ # constructor that takes a Map<String, String> of parameters will do.
89
+ seed_provider:
90
+ # Addresses of hosts that are deemed contact points.
91
+ # Cassandra nodes use this list of hosts to find each other and learn
92
+ # the topology of the ring. You must change this if you are running
93
+ # multiple nodes!
94
+ - class_name: org.apache.cassandra.locator.SimpleSeedProvider
95
+ parameters:
96
+ # seeds is actually a comma-delimited list of addresses.
97
+ # Ex: "<ip1>,<ip2>,<ip3>"
98
+ - seeds: "127.0.0.1"
99
+
100
+ # emergency pressure valve: each time heap usage after a full (CMS)
101
+ # garbage collection is above this fraction of the max, Cassandra will
102
+ # flush the largest memtables.
103
+ #
104
+ # Set to 1.0 to disable. Setting this lower than
105
+ # CMSInitiatingOccupancyFraction is not likely to be useful.
106
+ #
107
+ # RELYING ON THIS AS YOUR PRIMARY TUNING MECHANISM WILL WORK POORLY:
108
+ # it is most effective under light to moderate load, or read-heavy
109
+ # workloads; under truly massive write load, it will often be too
110
+ # little, too late.
111
+ flush_largest_memtables_at: 0.75
112
+
113
+ # emergency pressure valve #2: the first time heap usage after a full
114
+ # (CMS) garbage collection is above this fraction of the max,
115
+ # Cassandra will reduce cache maximum _capacity_ to the given fraction
116
+ # of the current _size_. Should usually be set substantially above
117
+ # flush_largest_memtables_at, since that will have less long-term
118
+ # impact on the system.
119
+ #
120
+ # Set to 1.0 to disable. Setting this lower than
121
+ # CMSInitiatingOccupancyFraction is not likely to be useful.
122
+ reduce_cache_sizes_at: 0.85
123
+ reduce_cache_capacity_to: 0.6
124
+
125
+ # For workloads with more data than can fit in memory, Cassandra's
126
+ # bottleneck will be reads that need to fetch data from
127
+ # disk. "concurrent_reads" should be set to (16 * number_of_drives) in
128
+ # order to allow the operations to enqueue low enough in the stack
129
+ # that the OS and drives can reorder them.
130
+ #
131
+ # On the other hand, since writes are almost never IO bound, the ideal
132
+ # number of "concurrent_writes" is dependent on the number of cores in
133
+ # your system; (8 * number_of_cores) is a good rule of thumb.
134
+ concurrent_reads: 32
135
+ concurrent_writes: 32
136
+
137
+ # Total memory to use for memtables. Cassandra will flush the largest
138
+ # memtable when this much memory is used.
139
+ # If omitted, Cassandra will set it to 1/3 of the heap.
140
+ # memtable_total_space_in_mb: 2048
141
+
142
+ # Total space to use for commitlogs.
143
+ # If space gets above this value (it will round up to the next nearest
144
+ # segment multiple), Cassandra will flush every dirty CF in the oldest
145
+ # segment and remove it.
146
+ # commitlog_total_space_in_mb: 4096
147
+
148
+ # This sets the amount of memtable flush writer threads. These will
149
+ # be blocked by disk io, and each one will hold a memtable in memory
150
+ # while blocked. If you have a large heap and many data directories,
151
+ # you can increase this value for better flush performance.
152
+ # By default this will be set to the amount of data directories defined.
153
+ #memtable_flush_writers: 1
154
+
155
+ # the number of full memtables to allow pending flush, that is,
156
+ # waiting for a writer thread. At a minimum, this should be set to
157
+ # the maximum number of secondary indexes created on a single CF.
158
+ memtable_flush_queue_size: 4
159
+
160
+ # Buffer size to use when performing contiguous column slices.
161
+ # Increase this to the size of the column slices you typically perform
162
+ sliced_buffer_size_in_kb: 64
163
+
164
+ # TCP port, for commands and data
165
+ storage_port: 7000
166
+
167
+ # Address to bind to and tell other Cassandra nodes to connect to. You
168
+ # _must_ change this if you want multiple nodes to be able to
169
+ # communicate!
170
+ #
171
+ # Leaving it blank leaves it up to InetAddress.getLocalHost(). This
172
+ # will always do the Right Thing *if* the node is properly configured
173
+ # (hostname, name resolution, etc), and the Right Thing is to use the
174
+ # address associated with the hostname (it might not be).
175
+ #
176
+ # Setting this to 0.0.0.0 is always wrong.
177
+ listen_address: localhost
178
+
179
+ # Address to broadcast to other Cassandra nodes
180
+ # Leaving this blank will set it to the same value as listen_address
181
+ # broadcast_address: 1.2.3.4
182
+
183
+ # The address to bind the Thrift RPC service to -- clients connect
184
+ # here. Unlike ListenAddress above, you *can* specify 0.0.0.0 here if
185
+ # you want Thrift to listen on all interfaces.
186
+ #
187
+ # Leaving this blank has the same effect it does for ListenAddress,
188
+ # (i.e. it will be based on the configured hostname of the node).
189
+ rpc_address: localhost
190
+ # port for Thrift to listen for clients on
191
+ rpc_port: 9160
192
+
193
+ # enable or disable keepalive on rpc connections
194
+ rpc_keepalive: true
195
+
196
+ # Cassandra provides three options for the RPC Server:
197
+ #
198
+ # sync -> One connection per thread in the rpc pool (see below).
199
+ # For a very large number of clients, memory will be your limiting
200
+ # factor; on a 64 bit JVM, 128KB is the minimum stack size per thread.
201
+ # Connection pooling is very, very strongly recommended.
202
+ #
203
+ # async -> Nonblocking server implementation with one thread to serve
204
+ # rpc connections. This is not recommended for high throughput use
205
+ # cases. Async has been tested to be about 50% slower than sync
206
+ # or hsha and is deprecated: it will be removed in the next major release.
207
+ #
208
+ # hsha -> Stands for "half synchronous, half asynchronous." The rpc thread pool
209
+ # (see below) is used to manage requests, but the threads are multiplexed
210
+ # across the different clients.
211
+ #
212
+ # The default is sync because on Windows hsha is about 30% slower. On Linux,
213
+ # sync/hsha performance is about the same, with hsha of course using less memory.
214
+ rpc_server_type: sync
215
+
216
+ # Uncomment rpc_min|max|thread to set request pool size.
217
+ # You would primarily set max for the sync server to safeguard against
218
+ # misbehaved clients; if you do hit the max, Cassandra will block until one
219
+ # disconnects before accepting more. The defaults for sync are min of 16 and max
220
+ # unlimited.
221
+ #
222
+ # For the Hsha server, the min and max both default to the number of CPU cores.
223
+ #
224
+ # This configuration is ignored by the async server.
225
+ #
226
+ # rpc_min_threads: 16
227
+ # rpc_max_threads: 2048
228
+
229
+ # uncomment to set socket buffer sizes on rpc connections
230
+ # rpc_send_buff_size_in_bytes:
231
+ # rpc_recv_buff_size_in_bytes:
232
+
233
+ # Frame size for thrift (maximum field length).
234
+ # 0 disables TFramedTransport in favor of TSocket. This option
235
+ # is deprecated; we strongly recommend using Framed mode.
236
+ thrift_framed_transport_size_in_mb: 15
237
+
238
+ # The max length of a thrift message, including all fields and
239
+ # internal thrift overhead.
240
+ thrift_max_message_length_in_mb: 16
241
+
242
+ # Set to true to have Cassandra create a hard link to each sstable
243
+ # flushed or streamed locally in a backups/ subdirectory of the
244
+ # Keyspace data. Removing these links is the operator's
245
+ # responsibility.
246
+ incremental_backups: false
247
+
248
+ # Whether or not to take a snapshot before each compaction. Be
249
+ # careful using this option, since Cassandra won't clean up the
250
+ # snapshots for you. Mostly useful if you're paranoid when there
251
+ # is a data format change.
252
+ snapshot_before_compaction: false
253
+
254
+ # Add column indexes to a row after its contents reach this size.
255
+ # Increase if your column values are large, or if you have a very large
256
+ # number of columns. The competing causes are, Cassandra has to
257
+ # deserialize this much of the row to read a single column, so you want
258
+ # it to be small - at least if you do many partial-row reads - but all
259
+ # the index data is read for each access, so you don't want to generate
260
+ # that wastefully either.
261
+ column_index_size_in_kb: 64
262
+
263
+ # Size limit for rows being compacted in memory. Larger rows will spill
264
+ # over to disk and use a slower two-pass compaction process. A message
265
+ # will be logged specifying the row key.
266
+ in_memory_compaction_limit_in_mb: 64
267
+
268
+ # Number of simultaneous compactions to allow, NOT including
269
+ # validation "compactions" for anti-entropy repair. Simultaneous
270
+ # compactions can help preserve read performance in a mixed read/write
271
+ # workload, by mitigating the tendency of small sstables to accumulate
272
+ # during a single long running compactions. The default is usually
273
+ # fine and if you experience problems with compaction running too
274
+ # slowly or too fast, you should look at
275
+ # compaction_throughput_mb_per_sec first.
276
+ #
277
+ # This setting has no effect on LeveledCompactionStrategy.
278
+ #
279
+ # concurrent_compactors defaults to the number of cores.
280
+ # Uncomment to make compaction mono-threaded, the pre-0.8 default.
281
+ #concurrent_compactors: 1
282
+
283
+ # Multi-threaded compaction. When enabled, each compaction will use
284
+ # up to one thread per core, plus one thread per sstable being merged.
285
+ # This is usually only useful for SSD-based hardware: otherwise,
286
+ # your concern is usually to get compaction to do LESS i/o (see:
287
+ # compaction_throughput_mb_per_sec), not more.
288
+ multithreaded_compaction: false
289
+
290
+ # Throttles compaction to the given total throughput across the entire
291
+ # system. The faster you insert data, the faster you need to compact in
292
+ # order to keep the sstable count down, but in general, setting this to
293
+ # 16 to 32 times the rate you are inserting data is more than sufficient.
294
+ # Setting this to 0 disables throttling. Note that this account for all types
295
+ # of compaction, including validation compaction.
296
+ compaction_throughput_mb_per_sec: 16
297
+
298
+ # Track cached row keys during compaction, and re-cache their new
299
+ # positions in the compacted sstable. Disable if you use really large
300
+ # key caches.
301
+ compaction_preheat_key_cache: true
302
+
303
+ # Throttles all outbound streaming file transfers on this node to the
304
+ # given total throughput in Mbps. This is necessary because Cassandra does
305
+ # mostly sequential IO when streaming data during bootstrap or repair, which
306
+ # can lead to saturating the network connection and degrading rpc performance.
307
+ # When unset, the default is 400 Mbps or 50 MB/s.
308
+ # stream_throughput_outbound_megabits_per_sec: 400
309
+
310
+ # Time to wait for a reply from other nodes before failing the command
311
+ rpc_timeout_in_ms: 10000
312
+
313
+ # phi value that must be reached for a host to be marked down.
314
+ # most users should never need to adjust this.
315
+ # phi_convict_threshold: 8
316
+
317
+ # endpoint_snitch -- Set this to a class that implements
318
+ # IEndpointSnitch, which will let Cassandra know enough
319
+ # about your network topology to route requests efficiently.
320
+ # Out of the box, Cassandra provides
321
+ # - org.apache.cassandra.locator.SimpleSnitch:
322
+ # Treats Strategy order as proximity. This improves cache locality
323
+ # when disabling read repair, which can further improve throughput.
324
+ # - org.apache.cassandra.locator.RackInferringSnitch:
325
+ # Proximity is determined by rack and data center, which are
326
+ # assumed to correspond to the 3rd and 2nd octet of each node's
327
+ # IP address, respectively
328
+ # org.apache.cassandra.locator.PropertyFileSnitch:
329
+ # - Proximity is determined by rack and data center, which are
330
+ # explicitly configured in cassandra-topology.properties.
331
+ endpoint_snitch: org.apache.cassandra.locator.SimpleSnitch
332
+
333
+ # controls how often to perform the more expensive part of host score
334
+ # calculation
335
+ dynamic_snitch_update_interval_in_ms: 100
336
+ # controls how often to reset all host scores, allowing a bad host to
337
+ # possibly recover
338
+ dynamic_snitch_reset_interval_in_ms: 600000
339
+ # if set greater than zero and read_repair_chance is < 1.0, this will allow
340
+ # 'pinning' of replicas to hosts in order to increase cache capacity.
341
+ # The badness threshold will control how much worse the pinned host has to be
342
+ # before the dynamic snitch will prefer other replicas over it. This is
343
+ # expressed as a double which represents a percentage. Thus, a value of
344
+ # 0.2 means Cassandra would continue to prefer the static snitch values
345
+ # until the pinned host was 20% worse than the fastest.
346
+ dynamic_snitch_badness_threshold: 0.1
347
+
348
+ # request_scheduler -- Set this to a class that implements
349
+ # RequestScheduler, which will schedule incoming client requests
350
+ # according to the specific policy. This is useful for multi-tenancy
351
+ # with a single Cassandra cluster.
352
+ # NOTE: This is specifically for requests from the client and does
353
+ # not affect inter node communication.
354
+ # org.apache.cassandra.scheduler.NoScheduler - No scheduling takes place
355
+ # org.apache.cassandra.scheduler.RoundRobinScheduler - Round robin of
356
+ # client requests to a node with a separate queue for each
357
+ # request_scheduler_id. The scheduler is further customized by
358
+ # request_scheduler_options as described below.
359
+ request_scheduler: org.apache.cassandra.scheduler.NoScheduler
360
+
361
+ # Scheduler Options vary based on the type of scheduler
362
+ # NoScheduler - Has no options
363
+ # RoundRobin
364
+ # - throttle_limit -- The throttle_limit is the number of in-flight
365
+ # requests per client. Requests beyond
366
+ # that limit are queued up until
367
+ # running requests can complete.
368
+ # The value of 80 here is twice the number of
369
+ # concurrent_reads + concurrent_writes.
370
+ # - default_weight -- default_weight is optional and allows for
371
+ # overriding the default which is 1.
372
+ # - weights -- Weights are optional and will default to 1 or the
373
+ # overridden default_weight. The weight translates into how
374
+ # many requests are handled during each turn of the
375
+ # RoundRobin, based on the scheduler id.
376
+ #
377
+ # request_scheduler_options:
378
+ # throttle_limit: 80
379
+ # default_weight: 5
380
+ # weights:
381
+ # Keyspace1: 1
382
+ # Keyspace2: 5
383
+
384
+ # request_scheduler_id -- An identifer based on which to perform
385
+ # the request scheduling. Currently the only valid option is keyspace.
386
+ # request_scheduler_id: keyspace
387
+
388
+ # index_interval controls the sampling of entries from the primrary
389
+ # row index in terms of space versus time. The larger the interval,
390
+ # the smaller and less effective the sampling will be. In technicial
391
+ # terms, the interval coresponds to the number of index entries that
392
+ # are skipped between taking each sample. All the sampled entries
393
+ # must fit in memory. Generally, a value between 128 and 512 here
394
+ # coupled with a large key cache size on CFs results in the best trade
395
+ # offs. This value is not often changed, however if you have many
396
+ # very small rows (many to an OS page), then increasing this will
397
+ # often lower memory usage without a impact on performance.
398
+ index_interval: 128
399
+
400
+ # Enable or disable inter-node encryption
401
+ # Default settings are TLS v1, RSA 1024-bit keys (it is imperative that
402
+ # users generate their own keys) TLS_RSA_WITH_AES_128_CBC_SHA as the cipher
403
+ # suite for authentication, key exchange and encryption of the actual data transfers.
404
+ # NOTE: No custom encryption options are enabled at the moment
405
+ # The available internode options are : all, none
406
+ #
407
+ # The passwords used in these options must match the passwords used when generating
408
+ # the keystore and truststore. For instructions on generating these files, see:
409
+ # http://download.oracle.com/javase/6/docs/technotes/guides/security/jsse/JSSERefGuide.html#CreateKeystore
410
+ encryption_options:
411
+ internode_encryption: none
412
+ keystore: conf/.keystore
413
+ keystore_password: cassandra
414
+ truststore: conf/.truststore
415
+ truststore_password: cassandra