cassandra 0.13.0 → 0.14.0
Sign up to get free protection for your applications and to get access to all the features.
- data/CHANGELOG +15 -0
- data/Gemfile +8 -0
- data/Manifest +18 -0
- data/Rakefile +2 -1
- data/cassandra.gemspec +13 -9
- data/conf/0.6/schema.json +2 -2
- data/conf/0.6/storage-conf.xml +18 -18
- data/conf/0.8/schema.json +3 -0
- data/conf/0.8/schema.txt +7 -1
- data/conf/1.0/cassandra.in.sh +41 -0
- data/conf/1.0/cassandra.yaml +415 -0
- data/conf/1.0/log4j-server.properties +40 -0
- data/conf/1.0/schema.json +72 -0
- data/conf/1.0/schema.txt +57 -0
- data/ext/cassandra_native.c +34 -0
- data/ext/extconf.rb +9 -0
- data/lib/cassandra.rb +1 -0
- data/lib/cassandra/0.6/protocol.rb +10 -9
- data/lib/cassandra/cassandra.rb +32 -25
- data/lib/cassandra/columns.rb +38 -13
- data/lib/cassandra/composite.rb +37 -15
- data/lib/cassandra/dynamic_composite.rb +96 -0
- data/lib/cassandra/mock.rb +23 -14
- data/test/cassandra_mock_test.rb +6 -0
- data/test/cassandra_test.rb +390 -49
- data/test/composite_type_test.rb +42 -7
- metadata +47 -21
data/CHANGELOG
CHANGED
@@ -1,3 +1,18 @@
|
|
1
|
+
v0.14.0
|
2
|
+
- Numerous performance improvements (courtesy @nearbuy)
|
3
|
+
- Fixed many 0.6 bugs
|
4
|
+
- Added batch counter update support to Cassandra::Mock
|
5
|
+
- Add support DynamicComposite columns (issue #154, courtesy @nearbuy)
|
6
|
+
- API cleanup in get_indexed_slices (issue #155, courtesy @mcmire)
|
7
|
+
|
8
|
+
v0.13.0
|
9
|
+
- Support for new thrift gem versions
|
10
|
+
- Updated all links to point to new github repo
|
11
|
+
- Batch support for counter updates (courtesy @nearbuy)
|
12
|
+
- Counter super column support in get and get_range (courtesy @behrangj)
|
13
|
+
- Composite columns (courtesy @nearbuy)
|
14
|
+
- Gemspec cleanup
|
15
|
+
|
1
16
|
v0.12.2
|
2
17
|
- Respect the start_key in get_range. Resolves Issue #127.
|
3
18
|
- Fix issue with differences in gemspec and what is required. Resolves Issue #125.
|
data/Gemfile
ADDED
data/Manifest
CHANGED
@@ -1,4 +1,5 @@
|
|
1
1
|
CHANGELOG
|
2
|
+
Gemfile
|
2
3
|
LICENSE
|
3
4
|
Manifest
|
4
5
|
README.md
|
@@ -18,6 +19,13 @@ conf/0.8/cassandra.yaml
|
|
18
19
|
conf/0.8/log4j-server.properties
|
19
20
|
conf/0.8/schema.json
|
20
21
|
conf/0.8/schema.txt
|
22
|
+
conf/1.0/cassandra.in.sh
|
23
|
+
conf/1.0/cassandra.yaml
|
24
|
+
conf/1.0/log4j-server.properties
|
25
|
+
conf/1.0/schema.json
|
26
|
+
conf/1.0/schema.txt
|
27
|
+
ext/cassandra_native.c
|
28
|
+
ext/extconf.rb
|
21
29
|
lib/cassandra.rb
|
22
30
|
lib/cassandra/0.6.rb
|
23
31
|
lib/cassandra/0.6/cassandra.rb
|
@@ -31,13 +39,19 @@ lib/cassandra/0.8.rb
|
|
31
39
|
lib/cassandra/0.8/cassandra.rb
|
32
40
|
lib/cassandra/0.8/columns.rb
|
33
41
|
lib/cassandra/0.8/protocol.rb
|
42
|
+
lib/cassandra/1.0.rb
|
43
|
+
lib/cassandra/1.0/cassandra.rb
|
44
|
+
lib/cassandra/1.0/columns.rb
|
45
|
+
lib/cassandra/1.0/protocol.rb
|
34
46
|
lib/cassandra/array.rb
|
35
47
|
lib/cassandra/cassandra.rb
|
36
48
|
lib/cassandra/column_family.rb
|
37
49
|
lib/cassandra/columns.rb
|
38
50
|
lib/cassandra/comparable.rb
|
51
|
+
lib/cassandra/composite.rb
|
39
52
|
lib/cassandra/constants.rb
|
40
53
|
lib/cassandra/debug.rb
|
54
|
+
lib/cassandra/dynamic_composite.rb
|
41
55
|
lib/cassandra/helpers.rb
|
42
56
|
lib/cassandra/keyspace.rb
|
43
57
|
lib/cassandra/long.rb
|
@@ -49,6 +63,7 @@ test/cassandra_client_test.rb
|
|
49
63
|
test/cassandra_mock_test.rb
|
50
64
|
test/cassandra_test.rb
|
51
65
|
test/comparable_types_test.rb
|
66
|
+
test/composite_type_test.rb
|
52
67
|
test/eventmachine_test.rb
|
53
68
|
test/ordered_hash_test.rb
|
54
69
|
test/test_helper.rb
|
@@ -61,3 +76,6 @@ vendor/0.7/gen-rb/cassandra_types.rb
|
|
61
76
|
vendor/0.8/gen-rb/cassandra.rb
|
62
77
|
vendor/0.8/gen-rb/cassandra_constants.rb
|
63
78
|
vendor/0.8/gen-rb/cassandra_types.rb
|
79
|
+
vendor/1.0/gen-rb/cassandra.rb
|
80
|
+
vendor/1.0/gen-rb/cassandra_constants.rb
|
81
|
+
vendor/1.0/gen-rb/cassandra_types.rb
|
data/Rakefile
CHANGED
@@ -6,10 +6,11 @@ unless ENV['FROM_BIN_CASSANDRA_HELPER']
|
|
6
6
|
|
7
7
|
Echoe.new("cassandra") do |p|
|
8
8
|
p.author = "Evan Weaver, Ryan King"
|
9
|
-
p.
|
9
|
+
p.url = 'http://github.com/twitter/cassandra'
|
10
10
|
p.summary = "A Ruby client for the Cassandra distributed database."
|
11
11
|
p.rubygems_version = ">= 0.8"
|
12
12
|
p.dependencies = ['thrift_client >=0.7.0 <0.9', 'json', 'rake', 'simple_uuid ~>0.2.0']
|
13
|
+
p.development_dependencies = ['echoe']
|
13
14
|
p.ignore_pattern = /^(data|vendor\/cassandra|cassandra|vendor\/thrift|.*\.rbc)/
|
14
15
|
p.rdoc_pattern = /^(lib|bin|tasks|ext)|^README|^CHANGELOG|^TODO|^LICENSE|^COPYING$/
|
15
16
|
p.retain_gemspec = true
|
data/cassandra.gemspec
CHANGED
@@ -2,23 +2,24 @@
|
|
2
2
|
|
3
3
|
Gem::Specification.new do |s|
|
4
4
|
s.name = "cassandra"
|
5
|
-
s.version = "0.
|
5
|
+
s.version = "0.14.0"
|
6
6
|
|
7
7
|
s.required_rubygems_version = Gem::Requirement.new(">= 0.8") if s.respond_to? :required_rubygems_version=
|
8
8
|
s.authors = ["Evan Weaver, Ryan King"]
|
9
|
-
s.date = "2012-
|
9
|
+
s.date = "2012-08-06"
|
10
10
|
s.description = "A Ruby client for the Cassandra distributed database."
|
11
11
|
s.email = ""
|
12
12
|
s.executables = ["cassandra_helper"]
|
13
|
-
s.
|
14
|
-
s.
|
15
|
-
s.
|
13
|
+
s.extensions = ["ext/extconf.rb"]
|
14
|
+
s.extra_rdoc_files = ["CHANGELOG", "LICENSE", "README.md", "bin/cassandra_helper", "ext/cassandra_native.c", "ext/extconf.rb", "lib/cassandra.rb", "lib/cassandra/0.6.rb", "lib/cassandra/0.6/cassandra.rb", "lib/cassandra/0.6/columns.rb", "lib/cassandra/0.6/protocol.rb", "lib/cassandra/0.7.rb", "lib/cassandra/0.7/cassandra.rb", "lib/cassandra/0.7/columns.rb", "lib/cassandra/0.7/protocol.rb", "lib/cassandra/0.8.rb", "lib/cassandra/0.8/cassandra.rb", "lib/cassandra/0.8/columns.rb", "lib/cassandra/0.8/protocol.rb", "lib/cassandra/1.0.rb", "lib/cassandra/1.0/cassandra.rb", "lib/cassandra/1.0/columns.rb", "lib/cassandra/1.0/protocol.rb", "lib/cassandra/array.rb", "lib/cassandra/cassandra.rb", "lib/cassandra/column_family.rb", "lib/cassandra/columns.rb", "lib/cassandra/comparable.rb", "lib/cassandra/composite.rb", "lib/cassandra/constants.rb", "lib/cassandra/debug.rb", "lib/cassandra/dynamic_composite.rb", "lib/cassandra/helpers.rb", "lib/cassandra/keyspace.rb", "lib/cassandra/long.rb", "lib/cassandra/mock.rb", "lib/cassandra/ordered_hash.rb", "lib/cassandra/protocol.rb", "lib/cassandra/time.rb"]
|
15
|
+
s.files = ["CHANGELOG", "Gemfile", "LICENSE", "Manifest", "README.md", "Rakefile", "bin/cassandra_helper", "conf/0.6/cassandra.in.sh", "conf/0.6/log4j.properties", "conf/0.6/schema.json", "conf/0.6/storage-conf.xml", "conf/0.7/cassandra.in.sh", "conf/0.7/cassandra.yaml", "conf/0.7/log4j-server.properties", "conf/0.7/schema.json", "conf/0.7/schema.txt", "conf/0.8/cassandra.in.sh", "conf/0.8/cassandra.yaml", "conf/0.8/log4j-server.properties", "conf/0.8/schema.json", "conf/0.8/schema.txt", "conf/1.0/cassandra.in.sh", "conf/1.0/cassandra.yaml", "conf/1.0/log4j-server.properties", "conf/1.0/schema.json", "conf/1.0/schema.txt", "ext/cassandra_native.c", "ext/extconf.rb", "lib/cassandra.rb", "lib/cassandra/0.6.rb", "lib/cassandra/0.6/cassandra.rb", "lib/cassandra/0.6/columns.rb", "lib/cassandra/0.6/protocol.rb", "lib/cassandra/0.7.rb", "lib/cassandra/0.7/cassandra.rb", "lib/cassandra/0.7/columns.rb", "lib/cassandra/0.7/protocol.rb", "lib/cassandra/0.8.rb", "lib/cassandra/0.8/cassandra.rb", "lib/cassandra/0.8/columns.rb", "lib/cassandra/0.8/protocol.rb", "lib/cassandra/1.0.rb", "lib/cassandra/1.0/cassandra.rb", "lib/cassandra/1.0/columns.rb", "lib/cassandra/1.0/protocol.rb", "lib/cassandra/array.rb", "lib/cassandra/cassandra.rb", "lib/cassandra/column_family.rb", "lib/cassandra/columns.rb", "lib/cassandra/comparable.rb", "lib/cassandra/composite.rb", "lib/cassandra/constants.rb", "lib/cassandra/debug.rb", "lib/cassandra/dynamic_composite.rb", "lib/cassandra/helpers.rb", "lib/cassandra/keyspace.rb", "lib/cassandra/long.rb", "lib/cassandra/mock.rb", "lib/cassandra/ordered_hash.rb", "lib/cassandra/protocol.rb", "lib/cassandra/time.rb", "test/cassandra_client_test.rb", "test/cassandra_mock_test.rb", "test/cassandra_test.rb", "test/comparable_types_test.rb", "test/composite_type_test.rb", "test/eventmachine_test.rb", "test/ordered_hash_test.rb", "test/test_helper.rb", "vendor/0.6/gen-rb/cassandra.rb", "vendor/0.6/gen-rb/cassandra_constants.rb", "vendor/0.6/gen-rb/cassandra_types.rb", "vendor/0.7/gen-rb/cassandra.rb", "vendor/0.7/gen-rb/cassandra_constants.rb", "vendor/0.7/gen-rb/cassandra_types.rb", "vendor/0.8/gen-rb/cassandra.rb", "vendor/0.8/gen-rb/cassandra_constants.rb", "vendor/0.8/gen-rb/cassandra_types.rb", "vendor/1.0/gen-rb/cassandra.rb", "vendor/1.0/gen-rb/cassandra_constants.rb", "vendor/1.0/gen-rb/cassandra_types.rb", "cassandra.gemspec"]
|
16
|
+
s.homepage = "http://github.com/twitter/cassandra"
|
16
17
|
s.rdoc_options = ["--line-numbers", "--inline-source", "--title", "Cassandra", "--main", "README.md"]
|
17
|
-
s.require_paths = ["lib"]
|
18
|
-
s.rubyforge_project = "
|
19
|
-
s.rubygems_version = "1.8.
|
18
|
+
s.require_paths = ["lib", "ext"]
|
19
|
+
s.rubyforge_project = "cassandra"
|
20
|
+
s.rubygems_version = "1.8.17"
|
20
21
|
s.summary = "A Ruby client for the Cassandra distributed database."
|
21
|
-
s.test_files = ["test/cassandra_mock_test.rb", "test/
|
22
|
+
s.test_files = ["test/cassandra_client_test.rb", "test/cassandra_mock_test.rb", "test/cassandra_test.rb", "test/comparable_types_test.rb", "test/composite_type_test.rb", "test/eventmachine_test.rb", "test/ordered_hash_test.rb", "test/test_helper.rb"]
|
22
23
|
|
23
24
|
if s.respond_to? :specification_version then
|
24
25
|
s.specification_version = 3
|
@@ -28,16 +29,19 @@ Gem::Specification.new do |s|
|
|
28
29
|
s.add_runtime_dependency(%q<json>, [">= 0"])
|
29
30
|
s.add_runtime_dependency(%q<rake>, [">= 0"])
|
30
31
|
s.add_runtime_dependency(%q<simple_uuid>, ["~> 0.2.0"])
|
32
|
+
s.add_development_dependency(%q<echoe>, [">= 0"])
|
31
33
|
else
|
32
34
|
s.add_dependency(%q<thrift_client>, ["< 0.9", ">= 0.7.0"])
|
33
35
|
s.add_dependency(%q<json>, [">= 0"])
|
34
36
|
s.add_dependency(%q<rake>, [">= 0"])
|
35
37
|
s.add_dependency(%q<simple_uuid>, ["~> 0.2.0"])
|
38
|
+
s.add_dependency(%q<echoe>, [">= 0"])
|
36
39
|
end
|
37
40
|
else
|
38
41
|
s.add_dependency(%q<thrift_client>, ["< 0.9", ">= 0.7.0"])
|
39
42
|
s.add_dependency(%q<json>, [">= 0"])
|
40
43
|
s.add_dependency(%q<rake>, [">= 0"])
|
41
44
|
s.add_dependency(%q<simple_uuid>, ["~> 0.2.0"])
|
45
|
+
s.add_dependency(%q<echoe>, [">= 0"])
|
42
46
|
end
|
43
47
|
end
|
data/conf/0.6/schema.json
CHANGED
@@ -5,7 +5,7 @@
|
|
5
5
|
"UserAudits":{
|
6
6
|
"CompareWith":"org.apache.cassandra.db.marshal.UTF8Type",
|
7
7
|
"Type":"Standard"},
|
8
|
-
"UserRelationships":{
|
8
|
+
"UserRelationships":{
|
9
9
|
"CompareSubcolumnsWith":"org.apache.cassandra.db.marshal.TimeUUIDType",
|
10
10
|
"CompareWith":"org.apache.cassandra.db.marshal.UTF8Type",
|
11
11
|
"Type":"Super"},
|
@@ -22,7 +22,7 @@
|
|
22
22
|
"CompareSubcolumnsWith":"org.apache.cassandra.db.marshal.TimeUUIDType",
|
23
23
|
"CompareWith":"org.apache.cassandra.db.marshal.UTF8Type",
|
24
24
|
"Type":"Super"},
|
25
|
-
"
|
25
|
+
"Indexes":{
|
26
26
|
"CompareWith":"org.apache.cassandra.db.marshal.UTF8Type",
|
27
27
|
"Type":"Super"},
|
28
28
|
"TimelinishThings":{
|
data/conf/0.6/storage-conf.xml
CHANGED
@@ -21,15 +21,15 @@
|
|
21
21
|
<!-- Basic Configuration -->
|
22
22
|
<!--======================================================================-->
|
23
23
|
|
24
|
-
<!--
|
24
|
+
<!--
|
25
25
|
~ The name of this cluster. This is mainly used to prevent machines in
|
26
26
|
~ one logical cluster from joining another.
|
27
27
|
-->
|
28
28
|
<ClusterName>Test</ClusterName>
|
29
29
|
|
30
30
|
<!--
|
31
|
-
~ Turn on to make new [non-seed] nodes automatically migrate the right data
|
32
|
-
~ to themselves. (If no InitialToken is specified, they will pick one
|
31
|
+
~ Turn on to make new [non-seed] nodes automatically migrate the right data
|
32
|
+
~ to themselves. (If no InitialToken is specified, they will pick one
|
33
33
|
~ such that they will get half the range of the most-loaded node.)
|
34
34
|
~ If a node starts up without bootstrapping, it will mark itself bootstrapped
|
35
35
|
~ so that you can't subsequently accidently bootstrap a node with
|
@@ -62,8 +62,8 @@
|
|
62
62
|
<ColumnFamily CompareWith="UTF8Type" Name="Usernames" />
|
63
63
|
<ColumnFamily CompareWith="UTF8Type" Name="Statuses" />
|
64
64
|
<ColumnFamily CompareWith="UTF8Type" Name="StatusAudits" />
|
65
|
-
<ColumnFamily CompareWith="UTF8Type" CompareSubcolumnsWith="TimeUUIDType" ColumnType="Super" Name="StatusRelationships" />
|
66
|
-
<ColumnFamily CompareWith="UTF8Type" ColumnType="Super" Name="
|
65
|
+
<ColumnFamily CompareWith="UTF8Type" CompareSubcolumnsWith="TimeUUIDType" ColumnType="Super" Name="StatusRelationships" />
|
66
|
+
<ColumnFamily CompareWith="UTF8Type" ColumnType="Super" Name="Indexes" />
|
67
67
|
<ColumnFamily CompareWith="BytesType" ColumnType="Standard" Name="TimelinishThings" />
|
68
68
|
|
69
69
|
<ReplicaPlacementStrategy>org.apache.cassandra.locator.RackUnawareStrategy</ReplicaPlacementStrategy>
|
@@ -71,7 +71,7 @@
|
|
71
71
|
<EndPointSnitch>org.apache.cassandra.locator.EndPointSnitch</EndPointSnitch>
|
72
72
|
</Keyspace>
|
73
73
|
|
74
|
-
<Keyspace Name="Multiblog">
|
74
|
+
<Keyspace Name="Multiblog">
|
75
75
|
<KeysCachedFraction>0.01</KeysCachedFraction>
|
76
76
|
<ColumnFamily CompareWith="TimeUUIDType" Name="Blogs"/>
|
77
77
|
<ColumnFamily CompareWith="TimeUUIDType" Name="Comments"/>
|
@@ -123,7 +123,7 @@
|
|
123
123
|
~ Authenticator: any IAuthenticator may be used, including your own as long
|
124
124
|
~ as it is on the classpath. Out of the box, Cassandra provides
|
125
125
|
~ org.apache.cassandra.auth.AllowAllAuthenticator and,
|
126
|
-
~ org.apache.cassandra.auth.SimpleAuthenticator
|
126
|
+
~ org.apache.cassandra.auth.SimpleAuthenticator
|
127
127
|
~ (SimpleAuthenticator uses access.properties and passwd.properties by
|
128
128
|
~ default).
|
129
129
|
~
|
@@ -153,7 +153,7 @@
|
|
153
153
|
~ are sent to the node with the "closest" token, so distributing your
|
154
154
|
~ tokens equally along the key distribution space will spread keys
|
155
155
|
~ evenly across your cluster.) This setting is only checked the first
|
156
|
-
~ time a node is started.
|
156
|
+
~ time a node is started.
|
157
157
|
|
158
158
|
~ This can also be useful with RandomPartitioner to force equal spacing
|
159
159
|
~ of tokens around the hash space, especially for clusters with a small
|
@@ -195,9 +195,9 @@
|
|
195
195
|
|
196
196
|
<!-- Local hosts and ports -->
|
197
197
|
|
198
|
-
<!--
|
198
|
+
<!--
|
199
199
|
~ Address to bind to and tell other nodes to connect to. You _must_
|
200
|
-
~ change this if you want multiple nodes to be able to communicate!
|
200
|
+
~ change this if you want multiple nodes to be able to communicate!
|
201
201
|
~
|
202
202
|
~ Leaving it blank leaves it up to InetAddress.getLocalHost(). This
|
203
203
|
~ will always do the Right Thing *if* the node is properly configured
|
@@ -219,9 +219,9 @@
|
|
219
219
|
<ThriftAddress>localhost</ThriftAddress>
|
220
220
|
<!-- Thrift RPC port (the port clients connect to). -->
|
221
221
|
<ThriftPort>9160</ThriftPort>
|
222
|
-
<!--
|
222
|
+
<!--
|
223
223
|
~ Whether or not to use a framed transport for Thrift. If this option
|
224
|
-
~ is set to true then you must also use a framed transport on the
|
224
|
+
~ is set to true then you must also use a framed transport on the
|
225
225
|
~ client-side, (framed and non-framed transports are not compatible).
|
226
226
|
-->
|
227
227
|
<ThriftFramedTransport>false</ThriftFramedTransport>
|
@@ -245,16 +245,16 @@
|
|
245
245
|
|
246
246
|
<!--
|
247
247
|
~ Buffer size to use when performing contiguous column slices. Increase
|
248
|
-
~ this to the size of the column slices you typically perform.
|
249
|
-
~ (Name-based queries are performed with a buffer size of
|
248
|
+
~ this to the size of the column slices you typically perform.
|
249
|
+
~ (Name-based queries are performed with a buffer size of
|
250
250
|
~ ColumnIndexSizeInKB.)
|
251
251
|
-->
|
252
252
|
<SlicedBufferSizeInKB>64</SlicedBufferSizeInKB>
|
253
253
|
|
254
254
|
<!--
|
255
|
-
~ Buffer size to use when flushing memtables to disk. (Only one
|
255
|
+
~ Buffer size to use when flushing memtables to disk. (Only one
|
256
256
|
~ memtable is ever flushed at a time.) Increase (decrease) the index
|
257
|
-
~ buffer size relative to the data buffer if you have few (many)
|
257
|
+
~ buffer size relative to the data buffer if you have few (many)
|
258
258
|
~ columns per key. Bigger is only better _if_ your memtables get large
|
259
259
|
~ enough to use the space. (Check in your data directory after your
|
260
260
|
~ app has been running long enough.) -->
|
@@ -274,7 +274,7 @@
|
|
274
274
|
|
275
275
|
<!--
|
276
276
|
~ Flush memtable after this much data has been inserted, including
|
277
|
-
~ overwritten data. There is one memtable per column family, and
|
277
|
+
~ overwritten data. There is one memtable per column family, and
|
278
278
|
~ this threshold is based solely on the amount of data stored, not
|
279
279
|
~ actual heap memory usage (there is some overhead in indexing the
|
280
280
|
~ columns).
|
@@ -339,7 +339,7 @@
|
|
339
339
|
~ individually). Reasonable values range from a minimal 0.1 to 10 or
|
340
340
|
~ even more if throughput matters more than latency.
|
341
341
|
-->
|
342
|
-
<!-- <CommitLogSyncBatchWindowInMS>1</CommitLogSyncBatchWindowInMS> -->
|
342
|
+
<!-- <CommitLogSyncBatchWindowInMS>1</CommitLogSyncBatchWindowInMS> -->
|
343
343
|
|
344
344
|
<!--
|
345
345
|
~ Time to wait before garbage-collection deletion markers. Set this to
|
data/conf/0.8/schema.json
CHANGED
@@ -64,6 +64,9 @@
|
|
64
64
|
"column_type":"Super"},
|
65
65
|
"CompositeColumnConversion":{
|
66
66
|
"comparator_type":"org.apache.cassandra.db.marshal.CompositeType(org.apache.cassandra.db.marshal.IntegerType,org.apache.cassandra.db.marshal.UTF8Type)",
|
67
|
+
"column_type":"Standard"},
|
68
|
+
"DynamicComposite":{
|
69
|
+
"comparator_type":"org.apache.cassandra.db.marshal.DynamicCompositeType(u=>org.apache.cassandra.db.marshal.UUIDType,t=>org.apache.cassandra.db.marshal.TimeUUIDType,s=>org.apache.cassandra.db.marshal.UTF8Type,b=>org.apache.cassandra.db.marshal.BytesType,a=>org.apache.cassandra.db.marshal.AsciiType,l=>org.apache.cassandra.db.marshal.LongType,x=>org.apache.cassandra.db.marshal.LexicalUUIDType,i=>org.apache.cassandra.db.marshal.IntegerType)",
|
67
70
|
"column_type":"Standard"}
|
68
71
|
}
|
69
72
|
}
|
data/conf/0.8/schema.txt
CHANGED
@@ -15,7 +15,11 @@ create column family UserRelationships with
|
|
15
15
|
column_type = 'Super' and
|
16
16
|
subcomparator = 'TimeUUIDType';
|
17
17
|
create column family Usernames with comparator = 'UTF8Type';
|
18
|
-
create column family Statuses
|
18
|
+
create column family Statuses
|
19
|
+
with comparator = 'UTF8Type'
|
20
|
+
and column_metadata = [
|
21
|
+
{column_name: 'tags', validation_class: 'BytesType', index_type: 'KEYS'}
|
22
|
+
];
|
19
23
|
create column family StatusAudits with comparator = 'UTF8Type';
|
20
24
|
create column family StatusRelationships with
|
21
25
|
comparator = 'UTF8Type' and
|
@@ -49,3 +53,5 @@ use TypeConversions;
|
|
49
53
|
create column family UUIDColumnConversion with comparator = TimeUUIDType;
|
50
54
|
create column family SuperUUID with comparator = TimeUUIDType and column_type = Super;
|
51
55
|
create column family CompositeColumnConversion with comparator = 'CompositeType(IntegerType, UTF8Type)';
|
56
|
+
create column family DynamicComposite with comparator ='DynamicCompositeType
|
57
|
+
(a=>AsciiType,b=>BytesType,i=>IntegerType,x=>LexicalUUIDType,l=>LongType,t=>TimeUUIDType,s=>UTF8Type,u=>UUIDType)';
|
@@ -0,0 +1,41 @@
|
|
1
|
+
# Licensed to the Apache Software Foundation (ASF) under one
|
2
|
+
# or more contributor license agreements. See the NOTICE file
|
3
|
+
# distributed with this work for additional information
|
4
|
+
# regarding copyright ownership. The ASF licenses this file
|
5
|
+
# to you under the Apache License, Version 2.0 (the
|
6
|
+
# "License"); you may not use this file except in compliance
|
7
|
+
# with the License. You may obtain a copy of the License at
|
8
|
+
#
|
9
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10
|
+
#
|
11
|
+
# Unless required by applicable law or agreed to in writing, software
|
12
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
13
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
14
|
+
# See the License for the specific language governing permissions and
|
15
|
+
# limitations under the License.
|
16
|
+
|
17
|
+
if [ "x$CASSANDRA_HOME" = "x" ]; then
|
18
|
+
CASSANDRA_HOME=`dirname $0`/..
|
19
|
+
fi
|
20
|
+
|
21
|
+
# The directory where Cassandra's configs live (required)
|
22
|
+
if [ "x$CASSANDRA_CONF" = "x" ]; then
|
23
|
+
CASSANDRA_CONF=$CASSANDRA_HOME/conf
|
24
|
+
fi
|
25
|
+
|
26
|
+
# This can be the path to a jar file, or a directory containing the
|
27
|
+
# compiled classes. NOTE: This isn't needed by the startup script,
|
28
|
+
# it's just used here in constructing the classpath.
|
29
|
+
cassandra_bin=$CASSANDRA_HOME/build/classes/main
|
30
|
+
cassandra_bin=$cassandra_bin:$CASSANDRA_HOME/build/classes/thrift
|
31
|
+
#cassandra_bin=$cassandra_home/build/cassandra.jar
|
32
|
+
|
33
|
+
# JAVA_HOME can optionally be set here
|
34
|
+
#JAVA_HOME=/usr/local/jdk6
|
35
|
+
|
36
|
+
# The java classpath (required)
|
37
|
+
CLASSPATH=$CASSANDRA_CONF:$cassandra_bin
|
38
|
+
|
39
|
+
for jar in $CASSANDRA_HOME/lib/*.jar; do
|
40
|
+
CLASSPATH=$CLASSPATH:$jar
|
41
|
+
done
|
@@ -0,0 +1,415 @@
|
|
1
|
+
# Cassandra storage config YAML
|
2
|
+
|
3
|
+
# NOTE:
|
4
|
+
# See http://wiki.apache.org/cassandra/StorageConfiguration for
|
5
|
+
# full explanations of configuration directives
|
6
|
+
# /NOTE
|
7
|
+
|
8
|
+
# The name of the cluster. This is mainly used to prevent machines in
|
9
|
+
# one logical cluster from joining another.
|
10
|
+
cluster_name: 'Test Cluster'
|
11
|
+
|
12
|
+
# You should always specify InitialToken when setting up a production
|
13
|
+
# cluster for the first time, and often when adding capacity later.
|
14
|
+
# The principle is that each node should be given an equal slice of
|
15
|
+
# the token ring; see http://wiki.apache.org/cassandra/Operations
|
16
|
+
# for more details.
|
17
|
+
#
|
18
|
+
# If blank, Cassandra will request a token bisecting the range of
|
19
|
+
# the heaviest-loaded existing node. If there is no load information
|
20
|
+
# available, such as is the case with a new cluster, it will pick
|
21
|
+
# a random token, which will lead to hot spots.
|
22
|
+
initial_token: 0
|
23
|
+
|
24
|
+
# See http://wiki.apache.org/cassandra/HintedHandoff
|
25
|
+
hinted_handoff_enabled: true
|
26
|
+
# this defines the maximum amount of time a dead host will have hints
|
27
|
+
# generated. After it has been dead this long, hints will be dropped.
|
28
|
+
max_hint_window_in_ms: 3600000 # one hour
|
29
|
+
# Sleep this long after delivering each row or row fragment
|
30
|
+
hinted_handoff_throttle_delay_in_ms: 50
|
31
|
+
|
32
|
+
# authentication backend, implementing IAuthenticator; used to identify users
|
33
|
+
authenticator: org.apache.cassandra.auth.AllowAllAuthenticator
|
34
|
+
|
35
|
+
# authorization backend, implementing IAuthority; used to limit access/provide permissions
|
36
|
+
authority: org.apache.cassandra.auth.AllowAllAuthority
|
37
|
+
|
38
|
+
# The partitioner is responsible for distributing rows (by key) across
|
39
|
+
# nodes in the cluster. Any IPartitioner may be used, including your
|
40
|
+
# own as long as it is on the classpath. Out of the box, Cassandra
|
41
|
+
# provides org.apache.cassandra.dht.RandomPartitioner
|
42
|
+
# org.apache.cassandra.dht.ByteOrderedPartitioner,
|
43
|
+
# org.apache.cassandra.dht.OrderPreservingPartitioner (deprecated),
|
44
|
+
# and org.apache.cassandra.dht.CollatingOrderPreservingPartitioner
|
45
|
+
# (deprecated).
|
46
|
+
#
|
47
|
+
# - RandomPartitioner distributes rows across the cluster evenly by md5.
|
48
|
+
# When in doubt, this is the best option.
|
49
|
+
# - ByteOrderedPartitioner orders rows lexically by key bytes. BOP allows
|
50
|
+
# scanning rows in key order, but the ordering can generate hot spots
|
51
|
+
# for sequential insertion workloads.
|
52
|
+
# - OrderPreservingPartitioner is an obsolete form of BOP, that stores
|
53
|
+
# - keys in a less-efficient format and only works with keys that are
|
54
|
+
# UTF8-encoded Strings.
|
55
|
+
# - CollatingOPP colates according to EN,US rules rather than lexical byte
|
56
|
+
# ordering. Use this as an example if you need custom collation.
|
57
|
+
#
|
58
|
+
# See http://wiki.apache.org/cassandra/Operations for more on
|
59
|
+
# partitioners and token selection.
|
60
|
+
partitioner: org.apache.cassandra.dht.RandomPartitioner
|
61
|
+
|
62
|
+
# directories where Cassandra should store data on disk.
|
63
|
+
data_file_directories:
|
64
|
+
- data/data
|
65
|
+
|
66
|
+
# commit log
|
67
|
+
commitlog_directory: data/commitlog
|
68
|
+
|
69
|
+
# saved caches
|
70
|
+
saved_caches_directory: data/saved_caches
|
71
|
+
|
72
|
+
# commitlog_sync may be either "periodic" or "batch."
|
73
|
+
# When in batch mode, Cassandra won't ack writes until the commit log
|
74
|
+
# has been fsynced to disk. It will wait up to
|
75
|
+
# commitlog_sync_batch_window_in_ms milliseconds for other writes, before
|
76
|
+
# performing the sync.
|
77
|
+
#
|
78
|
+
# commitlog_sync: batch
|
79
|
+
# commitlog_sync_batch_window_in_ms: 50
|
80
|
+
#
|
81
|
+
# the other option is "periodic" where writes may be acked immediately
|
82
|
+
# and the CommitLog is simply synced every commitlog_sync_period_in_ms
|
83
|
+
# milliseconds.
|
84
|
+
commitlog_sync: periodic
|
85
|
+
commitlog_sync_period_in_ms: 10000
|
86
|
+
|
87
|
+
# any class that implements the SeedProvider interface and has a
|
88
|
+
# constructor that takes a Map<String, String> of parameters will do.
|
89
|
+
seed_provider:
|
90
|
+
# Addresses of hosts that are deemed contact points.
|
91
|
+
# Cassandra nodes use this list of hosts to find each other and learn
|
92
|
+
# the topology of the ring. You must change this if you are running
|
93
|
+
# multiple nodes!
|
94
|
+
- class_name: org.apache.cassandra.locator.SimpleSeedProvider
|
95
|
+
parameters:
|
96
|
+
# seeds is actually a comma-delimited list of addresses.
|
97
|
+
# Ex: "<ip1>,<ip2>,<ip3>"
|
98
|
+
- seeds: "127.0.0.1"
|
99
|
+
|
100
|
+
# emergency pressure valve: each time heap usage after a full (CMS)
|
101
|
+
# garbage collection is above this fraction of the max, Cassandra will
|
102
|
+
# flush the largest memtables.
|
103
|
+
#
|
104
|
+
# Set to 1.0 to disable. Setting this lower than
|
105
|
+
# CMSInitiatingOccupancyFraction is not likely to be useful.
|
106
|
+
#
|
107
|
+
# RELYING ON THIS AS YOUR PRIMARY TUNING MECHANISM WILL WORK POORLY:
|
108
|
+
# it is most effective under light to moderate load, or read-heavy
|
109
|
+
# workloads; under truly massive write load, it will often be too
|
110
|
+
# little, too late.
|
111
|
+
flush_largest_memtables_at: 0.75
|
112
|
+
|
113
|
+
# emergency pressure valve #2: the first time heap usage after a full
|
114
|
+
# (CMS) garbage collection is above this fraction of the max,
|
115
|
+
# Cassandra will reduce cache maximum _capacity_ to the given fraction
|
116
|
+
# of the current _size_. Should usually be set substantially above
|
117
|
+
# flush_largest_memtables_at, since that will have less long-term
|
118
|
+
# impact on the system.
|
119
|
+
#
|
120
|
+
# Set to 1.0 to disable. Setting this lower than
|
121
|
+
# CMSInitiatingOccupancyFraction is not likely to be useful.
|
122
|
+
reduce_cache_sizes_at: 0.85
|
123
|
+
reduce_cache_capacity_to: 0.6
|
124
|
+
|
125
|
+
# For workloads with more data than can fit in memory, Cassandra's
|
126
|
+
# bottleneck will be reads that need to fetch data from
|
127
|
+
# disk. "concurrent_reads" should be set to (16 * number_of_drives) in
|
128
|
+
# order to allow the operations to enqueue low enough in the stack
|
129
|
+
# that the OS and drives can reorder them.
|
130
|
+
#
|
131
|
+
# On the other hand, since writes are almost never IO bound, the ideal
|
132
|
+
# number of "concurrent_writes" is dependent on the number of cores in
|
133
|
+
# your system; (8 * number_of_cores) is a good rule of thumb.
|
134
|
+
concurrent_reads: 32
|
135
|
+
concurrent_writes: 32
|
136
|
+
|
137
|
+
# Total memory to use for memtables. Cassandra will flush the largest
|
138
|
+
# memtable when this much memory is used.
|
139
|
+
# If omitted, Cassandra will set it to 1/3 of the heap.
|
140
|
+
# memtable_total_space_in_mb: 2048
|
141
|
+
|
142
|
+
# Total space to use for commitlogs.
|
143
|
+
# If space gets above this value (it will round up to the next nearest
|
144
|
+
# segment multiple), Cassandra will flush every dirty CF in the oldest
|
145
|
+
# segment and remove it.
|
146
|
+
# commitlog_total_space_in_mb: 4096
|
147
|
+
|
148
|
+
# This sets the amount of memtable flush writer threads. These will
|
149
|
+
# be blocked by disk io, and each one will hold a memtable in memory
|
150
|
+
# while blocked. If you have a large heap and many data directories,
|
151
|
+
# you can increase this value for better flush performance.
|
152
|
+
# By default this will be set to the amount of data directories defined.
|
153
|
+
#memtable_flush_writers: 1
|
154
|
+
|
155
|
+
# the number of full memtables to allow pending flush, that is,
|
156
|
+
# waiting for a writer thread. At a minimum, this should be set to
|
157
|
+
# the maximum number of secondary indexes created on a single CF.
|
158
|
+
memtable_flush_queue_size: 4
|
159
|
+
|
160
|
+
# Buffer size to use when performing contiguous column slices.
|
161
|
+
# Increase this to the size of the column slices you typically perform
|
162
|
+
sliced_buffer_size_in_kb: 64
|
163
|
+
|
164
|
+
# TCP port, for commands and data
|
165
|
+
storage_port: 7000
|
166
|
+
|
167
|
+
# Address to bind to and tell other Cassandra nodes to connect to. You
|
168
|
+
# _must_ change this if you want multiple nodes to be able to
|
169
|
+
# communicate!
|
170
|
+
#
|
171
|
+
# Leaving it blank leaves it up to InetAddress.getLocalHost(). This
|
172
|
+
# will always do the Right Thing *if* the node is properly configured
|
173
|
+
# (hostname, name resolution, etc), and the Right Thing is to use the
|
174
|
+
# address associated with the hostname (it might not be).
|
175
|
+
#
|
176
|
+
# Setting this to 0.0.0.0 is always wrong.
|
177
|
+
listen_address: localhost
|
178
|
+
|
179
|
+
# Address to broadcast to other Cassandra nodes
|
180
|
+
# Leaving this blank will set it to the same value as listen_address
|
181
|
+
# broadcast_address: 1.2.3.4
|
182
|
+
|
183
|
+
# The address to bind the Thrift RPC service to -- clients connect
|
184
|
+
# here. Unlike ListenAddress above, you *can* specify 0.0.0.0 here if
|
185
|
+
# you want Thrift to listen on all interfaces.
|
186
|
+
#
|
187
|
+
# Leaving this blank has the same effect it does for ListenAddress,
|
188
|
+
# (i.e. it will be based on the configured hostname of the node).
|
189
|
+
rpc_address: localhost
|
190
|
+
# port for Thrift to listen for clients on
|
191
|
+
rpc_port: 9160
|
192
|
+
|
193
|
+
# enable or disable keepalive on rpc connections
|
194
|
+
rpc_keepalive: true
|
195
|
+
|
196
|
+
# Cassandra provides three options for the RPC Server:
|
197
|
+
#
|
198
|
+
# sync -> One connection per thread in the rpc pool (see below).
|
199
|
+
# For a very large number of clients, memory will be your limiting
|
200
|
+
# factor; on a 64 bit JVM, 128KB is the minimum stack size per thread.
|
201
|
+
# Connection pooling is very, very strongly recommended.
|
202
|
+
#
|
203
|
+
# async -> Nonblocking server implementation with one thread to serve
|
204
|
+
# rpc connections. This is not recommended for high throughput use
|
205
|
+
# cases. Async has been tested to be about 50% slower than sync
|
206
|
+
# or hsha and is deprecated: it will be removed in the next major release.
|
207
|
+
#
|
208
|
+
# hsha -> Stands for "half synchronous, half asynchronous." The rpc thread pool
|
209
|
+
# (see below) is used to manage requests, but the threads are multiplexed
|
210
|
+
# across the different clients.
|
211
|
+
#
|
212
|
+
# The default is sync because on Windows hsha is about 30% slower. On Linux,
|
213
|
+
# sync/hsha performance is about the same, with hsha of course using less memory.
|
214
|
+
rpc_server_type: sync
|
215
|
+
|
216
|
+
# Uncomment rpc_min|max|thread to set request pool size.
|
217
|
+
# You would primarily set max for the sync server to safeguard against
|
218
|
+
# misbehaved clients; if you do hit the max, Cassandra will block until one
|
219
|
+
# disconnects before accepting more. The defaults for sync are min of 16 and max
|
220
|
+
# unlimited.
|
221
|
+
#
|
222
|
+
# For the Hsha server, the min and max both default to the number of CPU cores.
|
223
|
+
#
|
224
|
+
# This configuration is ignored by the async server.
|
225
|
+
#
|
226
|
+
# rpc_min_threads: 16
|
227
|
+
# rpc_max_threads: 2048
|
228
|
+
|
229
|
+
# uncomment to set socket buffer sizes on rpc connections
|
230
|
+
# rpc_send_buff_size_in_bytes:
|
231
|
+
# rpc_recv_buff_size_in_bytes:
|
232
|
+
|
233
|
+
# Frame size for thrift (maximum field length).
|
234
|
+
# 0 disables TFramedTransport in favor of TSocket. This option
|
235
|
+
# is deprecated; we strongly recommend using Framed mode.
|
236
|
+
thrift_framed_transport_size_in_mb: 15
|
237
|
+
|
238
|
+
# The max length of a thrift message, including all fields and
|
239
|
+
# internal thrift overhead.
|
240
|
+
thrift_max_message_length_in_mb: 16
|
241
|
+
|
242
|
+
# Set to true to have Cassandra create a hard link to each sstable
|
243
|
+
# flushed or streamed locally in a backups/ subdirectory of the
|
244
|
+
# Keyspace data. Removing these links is the operator's
|
245
|
+
# responsibility.
|
246
|
+
incremental_backups: false
|
247
|
+
|
248
|
+
# Whether or not to take a snapshot before each compaction. Be
|
249
|
+
# careful using this option, since Cassandra won't clean up the
|
250
|
+
# snapshots for you. Mostly useful if you're paranoid when there
|
251
|
+
# is a data format change.
|
252
|
+
snapshot_before_compaction: false
|
253
|
+
|
254
|
+
# Add column indexes to a row after its contents reach this size.
|
255
|
+
# Increase if your column values are large, or if you have a very large
|
256
|
+
# number of columns. The competing causes are, Cassandra has to
|
257
|
+
# deserialize this much of the row to read a single column, so you want
|
258
|
+
# it to be small - at least if you do many partial-row reads - but all
|
259
|
+
# the index data is read for each access, so you don't want to generate
|
260
|
+
# that wastefully either.
|
261
|
+
column_index_size_in_kb: 64
|
262
|
+
|
263
|
+
# Size limit for rows being compacted in memory. Larger rows will spill
|
264
|
+
# over to disk and use a slower two-pass compaction process. A message
|
265
|
+
# will be logged specifying the row key.
|
266
|
+
in_memory_compaction_limit_in_mb: 64
|
267
|
+
|
268
|
+
# Number of simultaneous compactions to allow, NOT including
|
269
|
+
# validation "compactions" for anti-entropy repair. Simultaneous
|
270
|
+
# compactions can help preserve read performance in a mixed read/write
|
271
|
+
# workload, by mitigating the tendency of small sstables to accumulate
|
272
|
+
# during a single long running compactions. The default is usually
|
273
|
+
# fine and if you experience problems with compaction running too
|
274
|
+
# slowly or too fast, you should look at
|
275
|
+
# compaction_throughput_mb_per_sec first.
|
276
|
+
#
|
277
|
+
# This setting has no effect on LeveledCompactionStrategy.
|
278
|
+
#
|
279
|
+
# concurrent_compactors defaults to the number of cores.
|
280
|
+
# Uncomment to make compaction mono-threaded, the pre-0.8 default.
|
281
|
+
#concurrent_compactors: 1
|
282
|
+
|
283
|
+
# Multi-threaded compaction. When enabled, each compaction will use
|
284
|
+
# up to one thread per core, plus one thread per sstable being merged.
|
285
|
+
# This is usually only useful for SSD-based hardware: otherwise,
|
286
|
+
# your concern is usually to get compaction to do LESS i/o (see:
|
287
|
+
# compaction_throughput_mb_per_sec), not more.
|
288
|
+
multithreaded_compaction: false
|
289
|
+
|
290
|
+
# Throttles compaction to the given total throughput across the entire
|
291
|
+
# system. The faster you insert data, the faster you need to compact in
|
292
|
+
# order to keep the sstable count down, but in general, setting this to
|
293
|
+
# 16 to 32 times the rate you are inserting data is more than sufficient.
|
294
|
+
# Setting this to 0 disables throttling. Note that this account for all types
|
295
|
+
# of compaction, including validation compaction.
|
296
|
+
compaction_throughput_mb_per_sec: 16
|
297
|
+
|
298
|
+
# Track cached row keys during compaction, and re-cache their new
|
299
|
+
# positions in the compacted sstable. Disable if you use really large
|
300
|
+
# key caches.
|
301
|
+
compaction_preheat_key_cache: true
|
302
|
+
|
303
|
+
# Throttles all outbound streaming file transfers on this node to the
|
304
|
+
# given total throughput in Mbps. This is necessary because Cassandra does
|
305
|
+
# mostly sequential IO when streaming data during bootstrap or repair, which
|
306
|
+
# can lead to saturating the network connection and degrading rpc performance.
|
307
|
+
# When unset, the default is 400 Mbps or 50 MB/s.
|
308
|
+
# stream_throughput_outbound_megabits_per_sec: 400
|
309
|
+
|
310
|
+
# Time to wait for a reply from other nodes before failing the command
|
311
|
+
rpc_timeout_in_ms: 10000
|
312
|
+
|
313
|
+
# phi value that must be reached for a host to be marked down.
|
314
|
+
# most users should never need to adjust this.
|
315
|
+
# phi_convict_threshold: 8
|
316
|
+
|
317
|
+
# endpoint_snitch -- Set this to a class that implements
|
318
|
+
# IEndpointSnitch, which will let Cassandra know enough
|
319
|
+
# about your network topology to route requests efficiently.
|
320
|
+
# Out of the box, Cassandra provides
|
321
|
+
# - org.apache.cassandra.locator.SimpleSnitch:
|
322
|
+
# Treats Strategy order as proximity. This improves cache locality
|
323
|
+
# when disabling read repair, which can further improve throughput.
|
324
|
+
# - org.apache.cassandra.locator.RackInferringSnitch:
|
325
|
+
# Proximity is determined by rack and data center, which are
|
326
|
+
# assumed to correspond to the 3rd and 2nd octet of each node's
|
327
|
+
# IP address, respectively
|
328
|
+
# org.apache.cassandra.locator.PropertyFileSnitch:
|
329
|
+
# - Proximity is determined by rack and data center, which are
|
330
|
+
# explicitly configured in cassandra-topology.properties.
|
331
|
+
endpoint_snitch: org.apache.cassandra.locator.SimpleSnitch
|
332
|
+
|
333
|
+
# controls how often to perform the more expensive part of host score
|
334
|
+
# calculation
|
335
|
+
dynamic_snitch_update_interval_in_ms: 100
|
336
|
+
# controls how often to reset all host scores, allowing a bad host to
|
337
|
+
# possibly recover
|
338
|
+
dynamic_snitch_reset_interval_in_ms: 600000
|
339
|
+
# if set greater than zero and read_repair_chance is < 1.0, this will allow
|
340
|
+
# 'pinning' of replicas to hosts in order to increase cache capacity.
|
341
|
+
# The badness threshold will control how much worse the pinned host has to be
|
342
|
+
# before the dynamic snitch will prefer other replicas over it. This is
|
343
|
+
# expressed as a double which represents a percentage. Thus, a value of
|
344
|
+
# 0.2 means Cassandra would continue to prefer the static snitch values
|
345
|
+
# until the pinned host was 20% worse than the fastest.
|
346
|
+
dynamic_snitch_badness_threshold: 0.1
|
347
|
+
|
348
|
+
# request_scheduler -- Set this to a class that implements
|
349
|
+
# RequestScheduler, which will schedule incoming client requests
|
350
|
+
# according to the specific policy. This is useful for multi-tenancy
|
351
|
+
# with a single Cassandra cluster.
|
352
|
+
# NOTE: This is specifically for requests from the client and does
|
353
|
+
# not affect inter node communication.
|
354
|
+
# org.apache.cassandra.scheduler.NoScheduler - No scheduling takes place
|
355
|
+
# org.apache.cassandra.scheduler.RoundRobinScheduler - Round robin of
|
356
|
+
# client requests to a node with a separate queue for each
|
357
|
+
# request_scheduler_id. The scheduler is further customized by
|
358
|
+
# request_scheduler_options as described below.
|
359
|
+
request_scheduler: org.apache.cassandra.scheduler.NoScheduler
|
360
|
+
|
361
|
+
# Scheduler Options vary based on the type of scheduler
|
362
|
+
# NoScheduler - Has no options
|
363
|
+
# RoundRobin
|
364
|
+
# - throttle_limit -- The throttle_limit is the number of in-flight
|
365
|
+
# requests per client. Requests beyond
|
366
|
+
# that limit are queued up until
|
367
|
+
# running requests can complete.
|
368
|
+
# The value of 80 here is twice the number of
|
369
|
+
# concurrent_reads + concurrent_writes.
|
370
|
+
# - default_weight -- default_weight is optional and allows for
|
371
|
+
# overriding the default which is 1.
|
372
|
+
# - weights -- Weights are optional and will default to 1 or the
|
373
|
+
# overridden default_weight. The weight translates into how
|
374
|
+
# many requests are handled during each turn of the
|
375
|
+
# RoundRobin, based on the scheduler id.
|
376
|
+
#
|
377
|
+
# request_scheduler_options:
|
378
|
+
# throttle_limit: 80
|
379
|
+
# default_weight: 5
|
380
|
+
# weights:
|
381
|
+
# Keyspace1: 1
|
382
|
+
# Keyspace2: 5
|
383
|
+
|
384
|
+
# request_scheduler_id -- An identifer based on which to perform
|
385
|
+
# the request scheduling. Currently the only valid option is keyspace.
|
386
|
+
# request_scheduler_id: keyspace
|
387
|
+
|
388
|
+
# index_interval controls the sampling of entries from the primrary
|
389
|
+
# row index in terms of space versus time. The larger the interval,
|
390
|
+
# the smaller and less effective the sampling will be. In technicial
|
391
|
+
# terms, the interval coresponds to the number of index entries that
|
392
|
+
# are skipped between taking each sample. All the sampled entries
|
393
|
+
# must fit in memory. Generally, a value between 128 and 512 here
|
394
|
+
# coupled with a large key cache size on CFs results in the best trade
|
395
|
+
# offs. This value is not often changed, however if you have many
|
396
|
+
# very small rows (many to an OS page), then increasing this will
|
397
|
+
# often lower memory usage without a impact on performance.
|
398
|
+
index_interval: 128
|
399
|
+
|
400
|
+
# Enable or disable inter-node encryption
|
401
|
+
# Default settings are TLS v1, RSA 1024-bit keys (it is imperative that
|
402
|
+
# users generate their own keys) TLS_RSA_WITH_AES_128_CBC_SHA as the cipher
|
403
|
+
# suite for authentication, key exchange and encryption of the actual data transfers.
|
404
|
+
# NOTE: No custom encryption options are enabled at the moment
|
405
|
+
# The available internode options are : all, none
|
406
|
+
#
|
407
|
+
# The passwords used in these options must match the passwords used when generating
|
408
|
+
# the keystore and truststore. For instructions on generating these files, see:
|
409
|
+
# http://download.oracle.com/javase/6/docs/technotes/guides/security/jsse/JSSERefGuide.html#CreateKeystore
|
410
|
+
encryption_options:
|
411
|
+
internode_encryption: none
|
412
|
+
keystore: conf/.keystore
|
413
|
+
keystore_password: cassandra
|
414
|
+
truststore: conf/.truststore
|
415
|
+
truststore_password: cassandra
|