cassandra 0.9.1 → 0.10.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (45) hide show
  1. data/CHANGELOG +12 -0
  2. data/Manifest +31 -12
  3. data/README.rdoc +3 -2
  4. data/Rakefile +53 -23
  5. data/cassandra.gemspec +6 -8
  6. data/conf/{cassandra.in.sh → 0.6/cassandra.in.sh} +0 -0
  7. data/conf/{log4j.properties → 0.6/log4j.properties} +0 -0
  8. data/conf/0.6/schema.json +48 -0
  9. data/conf/{storage-conf.xml → 0.6/storage-conf.xml} +5 -5
  10. data/conf/0.7/cassandra.in.sh +46 -0
  11. data/conf/0.7/cassandra.yaml +336 -0
  12. data/conf/0.7/log4j-server.properties +41 -0
  13. data/conf/0.7/schema.json +48 -0
  14. data/conf/0.7/schema.txt +56 -0
  15. data/conf/0.8/cassandra.in.sh +41 -0
  16. data/conf/0.8/cassandra.yaml +61 -0
  17. data/conf/0.8/log4j-server.properties +40 -0
  18. data/conf/0.8/schema.json +48 -0
  19. data/conf/0.8/schema.txt +56 -0
  20. data/lib/cassandra.rb +1 -1
  21. data/lib/cassandra/0.6/cassandra.rb +1 -0
  22. data/lib/cassandra/0.6/columns.rb +5 -7
  23. data/lib/cassandra/0.6/protocol.rb +1 -1
  24. data/lib/cassandra/0.7/cassandra.rb +5 -5
  25. data/lib/cassandra/0.7/columns.rb +5 -6
  26. data/lib/cassandra/0.7/protocol.rb +12 -3
  27. data/lib/cassandra/0.8.rb +7 -0
  28. data/lib/cassandra/0.8/cassandra.rb +272 -0
  29. data/lib/cassandra/0.8/column_family.rb +3 -0
  30. data/lib/cassandra/0.8/columns.rb +84 -0
  31. data/lib/cassandra/0.8/keyspace.rb +3 -0
  32. data/lib/cassandra/0.8/protocol.rb +120 -0
  33. data/lib/cassandra/cassandra.rb +6 -11
  34. data/lib/cassandra/helpers.rb +1 -0
  35. data/lib/cassandra/mock.rb +107 -64
  36. data/lib/cassandra/ordered_hash.rb +1 -6
  37. data/test/cassandra_mock_test.rb +7 -27
  38. data/test/cassandra_test.rb +41 -15
  39. data/test/eventmachine_test.rb +30 -30
  40. data/test/test_helper.rb +2 -1
  41. data/vendor/0.8/gen-rb/cassandra.rb +2215 -0
  42. data/vendor/0.8/gen-rb/cassandra_constants.rb +12 -0
  43. data/vendor/0.8/gen-rb/cassandra_types.rb +814 -0
  44. metadata +50 -27
  45. data/conf/cassandra.yaml +0 -113
data/CHANGELOG CHANGED
@@ -1,3 +1,15 @@
1
+ v0.10.0 Major Update (rjackson)
2
+ - Update Rakefile to install 0.6.13, 0.7.4, 0.8.0-beta1 to ~/cassandra/cassandra-VERSION
3
+ - Add data:load task to Rakefile for creating the schema required for the tests
4
+ - Default the Rakefile to use 0.8-beta1
5
+ - Setup test suite to work on 0.6.13, 0.7.4, and 0.8.0-beta1
6
+ - All tests pass for all supported (0.6.13, 0.7.4, 0.8.0-beta1) versions.
7
+ - Added Support for 0.8-beta1
8
+ - Changed get_index_slices to return a hash of rows
9
+ - Updated Cassandra::Mock to pass all tests for each Cassandra version
10
+
11
+ v0.9.2 fix bug with deletions in batch mutations
12
+
1
13
  v0.9.1 Support for secondary indexing. (jhermes)
2
14
  Fix bug in mock where we didn't support range queries. (therealadam)
3
15
  Support deletes in batch mutations. [blanquer]
data/Manifest CHANGED
@@ -1,24 +1,35 @@
1
- CHANGELOG
2
- LICENSE
3
- Manifest
4
- README.rdoc
5
- Rakefile
6
1
  bin/cassandra_helper
7
- conf/cassandra.in.sh
8
- conf/cassandra.yaml
9
- conf/log4j.properties
10
- conf/storage-conf.xml
11
- lib/cassandra.rb
12
- lib/cassandra/0.6.rb
2
+ CHANGELOG
3
+ conf/0.6/cassandra.in.sh
4
+ conf/0.6/log4j.properties
5
+ conf/0.6/schema.json
6
+ conf/0.6/storage-conf.xml
7
+ conf/0.7/cassandra.in.sh
8
+ conf/0.7/cassandra.yaml
9
+ conf/0.7/log4j-server.properties
10
+ conf/0.7/schema.json
11
+ conf/0.7/schema.txt
12
+ conf/0.8/cassandra.in.sh
13
+ conf/0.8/cassandra.yaml
14
+ conf/0.8/log4j-server.properties
15
+ conf/0.8/schema.json
16
+ conf/0.8/schema.txt
13
17
  lib/cassandra/0.6/cassandra.rb
14
18
  lib/cassandra/0.6/columns.rb
15
19
  lib/cassandra/0.6/protocol.rb
16
- lib/cassandra/0.7.rb
20
+ lib/cassandra/0.6.rb
17
21
  lib/cassandra/0.7/cassandra.rb
18
22
  lib/cassandra/0.7/column_family.rb
19
23
  lib/cassandra/0.7/columns.rb
20
24
  lib/cassandra/0.7/keyspace.rb
21
25
  lib/cassandra/0.7/protocol.rb
26
+ lib/cassandra/0.7.rb
27
+ lib/cassandra/0.8/cassandra.rb
28
+ lib/cassandra/0.8/column_family.rb
29
+ lib/cassandra/0.8/columns.rb
30
+ lib/cassandra/0.8/keyspace.rb
31
+ lib/cassandra/0.8/protocol.rb
32
+ lib/cassandra/0.8.rb
22
33
  lib/cassandra/array.rb
23
34
  lib/cassandra/cassandra.rb
24
35
  lib/cassandra/columns.rb
@@ -30,6 +41,11 @@ lib/cassandra/long.rb
30
41
  lib/cassandra/mock.rb
31
42
  lib/cassandra/ordered_hash.rb
32
43
  lib/cassandra/time.rb
44
+ lib/cassandra.rb
45
+ LICENSE
46
+ Manifest
47
+ Rakefile
48
+ README.rdoc
33
49
  test/cassandra_client_test.rb
34
50
  test/cassandra_mock_test.rb
35
51
  test/cassandra_test.rb
@@ -43,3 +59,6 @@ vendor/0.6/gen-rb/cassandra_types.rb
43
59
  vendor/0.7/gen-rb/cassandra.rb
44
60
  vendor/0.7/gen-rb/cassandra_constants.rb
45
61
  vendor/0.7/gen-rb/cassandra_types.rb
62
+ vendor/0.8/gen-rb/cassandra.rb
63
+ vendor/0.8/gen-rb/cassandra_constants.rb
64
+ vendor/0.8/gen-rb/cassandra_types.rb
data/README.rdoc CHANGED
@@ -22,9 +22,10 @@ You need Ruby 1.8 or 1.9. If you have those, just run:
22
22
 
23
23
  Cassandra itself is a rapidly moving target. In order to get a working server, use the `bin/cassandra_helper` script:
24
24
 
25
- cassandra_helper cassandra
25
+ CASSANDRA_VERSION=0.8 cassandra_helper cassandra
26
26
 
27
- A server will be installed in `$HOME/cassandra/server`, and started in debug mode.
27
+ Specify the Cassandra version by setting the CASSANDRA_VERSION
28
+ environment variable. A server will be installed in `$HOME/cassandra/cassandra-VERSION`, and started in debug mode.
28
29
 
29
30
  WARNING: Don't use the test folder for your data, as it will get overwritten when you update the gem.
30
31
 
data/Rakefile CHANGED
@@ -1,3 +1,5 @@
1
+ require 'fileutils'
2
+
1
3
  unless ENV['FROM_BIN_CASSANDRA_HELPER']
2
4
  require 'rubygems'
3
5
  require 'echoe'
@@ -13,44 +15,55 @@ unless ENV['FROM_BIN_CASSANDRA_HELPER']
13
15
  end
14
16
  end
15
17
 
18
+ CassandraBinaries = {
19
+ '0.6' => 'http://www.apache.org/dist/cassandra/0.6.13/apache-cassandra-0.6.13-bin.tar.gz',
20
+ '0.7' => 'http://www.apache.org/dist/cassandra/0.7.5/apache-cassandra-0.7.5-bin.tar.gz',
21
+ '0.8' => 'http://www.apache.org/dist/cassandra/0.8.0/apache-cassandra-0.8.0-beta1-bin.tar.gz'
22
+ }
23
+
16
24
  CASSANDRA_HOME = ENV['CASSANDRA_HOME'] || "#{ENV['HOME']}/cassandra"
17
- DOWNLOAD_DIR = "/tmp"
18
- DIST_URL = "http://www.fightrice.com/mirrors/apache/cassandra/0.6.12/apache-cassandra-0.6.12-bin.tar.gz"
19
- DIST_FILE = DIST_URL.split('/').last
25
+ CASSANDRA_VERSION = ENV['CASSANDRA_VERSION'] || '0.7'
26
+
27
+ def setup_cassandra_version(version = CASSANDRA_VERSION)
28
+ FileUtils.mkdir_p CASSANDRA_HOME
29
+
30
+ destination_directory = File.join(CASSANDRA_HOME, 'cassandra-' + CASSANDRA_VERSION)
31
+
32
+ unless File.exists?(File.join(destination_directory, 'bin','cassandra'))
33
+ download_source = CassandraBinaries[CASSANDRA_VERSION]
34
+ download_destination = File.join("/tmp", File.basename(download_source))
35
+ untar_directory = File.join(CASSANDRA_HOME, File.basename(download_source,'-bin.tar.gz'))
20
36
 
21
- directory CASSANDRA_HOME
22
- directory File.join(CASSANDRA_HOME, 'test', 'data')
37
+ puts "downloading cassandra"
38
+ sh "curl -L -o #{download_destination} #{download_source}"
39
+
40
+ sh "tar xzf #{download_destination} -C #{CASSANDRA_HOME}"
41
+ sh "mv #{untar_directory} #{destination_directory}"
42
+ end
43
+ end
23
44
 
24
45
  desc "Start Cassandra"
25
- task :cassandra => [:java, File.join(CASSANDRA_HOME, 'server'), File.join(CASSANDRA_HOME, 'test', 'data')] do
46
+ task :cassandra => :java do
47
+ setup_cassandra_version
48
+
26
49
  env = ""
27
50
  if !ENV["CASSANDRA_INCLUDE"]
28
- env << "CASSANDRA_INCLUDE=#{File.expand_path(Dir.pwd)}/conf/cassandra.in.sh "
29
- env << "CASSANDRA_HOME=#{CASSANDRA_HOME}/server "
30
- env << "CASSANDRA_CONF=#{File.expand_path(Dir.pwd)}/conf"
51
+ env << "CASSANDRA_INCLUDE=#{File.expand_path(Dir.pwd)}/conf/#{CASSANDRA_VERSION}/cassandra.in.sh "
52
+ env << "CASSANDRA_HOME=#{CASSANDRA_HOME}/cassandra-#{CASSANDRA_VERSION} "
53
+ env << "CASSANDRA_CONF=#{File.expand_path(Dir.pwd)}/conf/#{CASSANDRA_VERSION}"
31
54
  else
32
55
  env << "CASSANDRA_INCLUDE=#{ENV['CASSANDRA_INCLUDE']} "
33
56
  env << "CASSANDRA_HOME=#{ENV['CASSANDRA_HOME']} "
34
57
  env << "CASSANDRA_CONF=#{ENV['CASSANDRA_CONF']}"
35
58
  end
36
59
 
37
- Dir.chdir(File.join(CASSANDRA_HOME, 'server')) do
38
- sh("env #{env} bin/cassandra -f")
39
- end
40
- end
60
+ puts env
41
61
 
42
- file File.join(CASSANDRA_HOME, 'server') => File.join(DOWNLOAD_DIR, DIST_FILE) do
43
- Dir.chdir(CASSANDRA_HOME) do
44
- sh "tar xzf #{File.join(DOWNLOAD_DIR, DIST_FILE)} -C #{CASSANDRA_HOME}"
45
- sh "mv #{DIST_FILE.split('.')[0..2].join('.').sub('-bin', '')} server"
62
+ Dir.chdir(File.join(CASSANDRA_HOME, "cassandra-#{CASSANDRA_VERSION}")) do
63
+ sh("env #{env} bin/cassandra -f")
46
64
  end
47
65
  end
48
66
 
49
- file File.join(DOWNLOAD_DIR, DIST_FILE) => CASSANDRA_HOME do
50
- puts "downloading"
51
- cmd = "curl -L -o #{File.join(DOWNLOAD_DIR, DIST_FILE)} #{DIST_URL}"
52
- sh cmd
53
- end
54
67
 
55
68
  desc "Check Java version"
56
69
  task :java do
@@ -67,10 +80,27 @@ namespace :data do
67
80
  desc "Reset test data"
68
81
  task :reset do
69
82
  puts "Resetting test data"
70
- sh("rm -rf #{File.join(CASSANDRA_HOME, 'server', 'data')}")
83
+ sh("rm -rf #{File.join(CASSANDRA_HOME, "cassandra-#{CASSANDRA_VERSION}", 'data')}")
84
+ end
85
+
86
+ desc "Load test data structures."
87
+ task :load do
88
+ return true if CASSANDRA_VERSION == '0.6'
89
+
90
+ schema_path = "#{File.expand_path(Dir.pwd)}/conf/#{CASSANDRA_VERSION}/schema.txt"
91
+ puts "Loading test data structures."
92
+ Dir.chdir(File.join(CASSANDRA_HOME, "cassandra-#{CASSANDRA_VERSION}")) do
93
+ begin
94
+ sh("bin/cassandra-cli --host localhost --batch < #{schema_path}")
95
+ rescue
96
+ puts "Schema already loaded."
97
+ end
98
+ end
71
99
  end
72
100
  end
73
101
 
102
+ task :test => 'data:load'
103
+
74
104
  # desc "Regenerate thrift bindings for Cassandra" # Dev only
75
105
  task :thrift do
76
106
  puts "Generating Thrift bindings"
data/cassandra.gemspec CHANGED
@@ -2,27 +2,25 @@
2
2
 
3
3
  Gem::Specification.new do |s|
4
4
  s.name = %q{cassandra}
5
- s.version = "0.9.1"
5
+ s.version = "0.10.0"
6
6
 
7
7
  s.required_rubygems_version = Gem::Requirement.new(">= 0.8") if s.respond_to? :required_rubygems_version=
8
8
  s.authors = ["Evan Weaver, Ryan King"]
9
- s.date = %q{2011-04-06}
10
- s.default_executable = %q{cassandra_helper}
9
+ s.date = %q{2011-04-28}
11
10
  s.description = %q{A Ruby client for the Cassandra distributed database.}
12
11
  s.email = %q{}
13
12
  s.executables = ["cassandra_helper"]
14
- s.extra_rdoc_files = ["CHANGELOG", "LICENSE", "README.rdoc", "bin/cassandra_helper", "lib/cassandra.rb", "lib/cassandra/0.6.rb", "lib/cassandra/0.6/cassandra.rb", "lib/cassandra/0.6/columns.rb", "lib/cassandra/0.6/protocol.rb", "lib/cassandra/0.7.rb", "lib/cassandra/0.7/cassandra.rb", "lib/cassandra/0.7/column_family.rb", "lib/cassandra/0.7/columns.rb", "lib/cassandra/0.7/keyspace.rb", "lib/cassandra/0.7/protocol.rb", "lib/cassandra/array.rb", "lib/cassandra/cassandra.rb", "lib/cassandra/columns.rb", "lib/cassandra/comparable.rb", "lib/cassandra/constants.rb", "lib/cassandra/debug.rb", "lib/cassandra/helpers.rb", "lib/cassandra/long.rb", "lib/cassandra/mock.rb", "lib/cassandra/ordered_hash.rb", "lib/cassandra/time.rb"]
15
- s.files = ["CHANGELOG", "LICENSE", "Manifest", "README.rdoc", "Rakefile", "bin/cassandra_helper", "conf/cassandra.in.sh", "conf/cassandra.yaml", "conf/log4j.properties", "conf/storage-conf.xml", "lib/cassandra.rb", "lib/cassandra/0.6.rb", "lib/cassandra/0.6/cassandra.rb", "lib/cassandra/0.6/columns.rb", "lib/cassandra/0.6/protocol.rb", "lib/cassandra/0.7.rb", "lib/cassandra/0.7/cassandra.rb", "lib/cassandra/0.7/column_family.rb", "lib/cassandra/0.7/columns.rb", "lib/cassandra/0.7/keyspace.rb", "lib/cassandra/0.7/protocol.rb", "lib/cassandra/array.rb", "lib/cassandra/cassandra.rb", "lib/cassandra/columns.rb", "lib/cassandra/comparable.rb", "lib/cassandra/constants.rb", "lib/cassandra/debug.rb", "lib/cassandra/helpers.rb", "lib/cassandra/long.rb", "lib/cassandra/mock.rb", "lib/cassandra/ordered_hash.rb", "lib/cassandra/time.rb", "test/cassandra_client_test.rb", "test/cassandra_mock_test.rb", "test/cassandra_test.rb", "test/comparable_types_test.rb", "test/eventmachine_test.rb", "test/ordered_hash_test.rb", "test/test_helper.rb", "vendor/0.6/gen-rb/cassandra.rb", "vendor/0.6/gen-rb/cassandra_constants.rb", "vendor/0.6/gen-rb/cassandra_types.rb", "vendor/0.7/gen-rb/cassandra.rb", "vendor/0.7/gen-rb/cassandra_constants.rb", "vendor/0.7/gen-rb/cassandra_types.rb", "cassandra.gemspec"]
16
- s.homepage = %q{}
13
+ s.extra_rdoc_files = ["bin/cassandra_helper", "CHANGELOG", "lib/cassandra/0.6/cassandra.rb", "lib/cassandra/0.6/columns.rb", "lib/cassandra/0.6/protocol.rb", "lib/cassandra/0.6.rb", "lib/cassandra/0.7/cassandra.rb", "lib/cassandra/0.7/column_family.rb", "lib/cassandra/0.7/columns.rb", "lib/cassandra/0.7/keyspace.rb", "lib/cassandra/0.7/protocol.rb", "lib/cassandra/0.7.rb", "lib/cassandra/0.8/cassandra.rb", "lib/cassandra/0.8/column_family.rb", "lib/cassandra/0.8/columns.rb", "lib/cassandra/0.8/keyspace.rb", "lib/cassandra/0.8/protocol.rb", "lib/cassandra/0.8.rb", "lib/cassandra/array.rb", "lib/cassandra/cassandra.rb", "lib/cassandra/columns.rb", "lib/cassandra/comparable.rb", "lib/cassandra/constants.rb", "lib/cassandra/debug.rb", "lib/cassandra/helpers.rb", "lib/cassandra/long.rb", "lib/cassandra/mock.rb", "lib/cassandra/ordered_hash.rb", "lib/cassandra/time.rb", "lib/cassandra.rb", "LICENSE", "README.rdoc"]
14
+ s.files = ["bin/cassandra_helper", "CHANGELOG", "conf/0.6/cassandra.in.sh", "conf/0.6/log4j.properties", "conf/0.6/schema.json", "conf/0.6/storage-conf.xml", "conf/0.7/cassandra.in.sh", "conf/0.7/cassandra.yaml", "conf/0.7/log4j-server.properties", "conf/0.7/schema.json", "conf/0.7/schema.txt", "conf/0.8/cassandra.in.sh", "conf/0.8/cassandra.yaml", "conf/0.8/log4j-server.properties", "conf/0.8/schema.json", "conf/0.8/schema.txt", "lib/cassandra/0.6/cassandra.rb", "lib/cassandra/0.6/columns.rb", "lib/cassandra/0.6/protocol.rb", "lib/cassandra/0.6.rb", "lib/cassandra/0.7/cassandra.rb", "lib/cassandra/0.7/column_family.rb", "lib/cassandra/0.7/columns.rb", "lib/cassandra/0.7/keyspace.rb", "lib/cassandra/0.7/protocol.rb", "lib/cassandra/0.7.rb", "lib/cassandra/0.8/cassandra.rb", "lib/cassandra/0.8/column_family.rb", "lib/cassandra/0.8/columns.rb", "lib/cassandra/0.8/keyspace.rb", "lib/cassandra/0.8/protocol.rb", "lib/cassandra/0.8.rb", "lib/cassandra/array.rb", "lib/cassandra/cassandra.rb", "lib/cassandra/columns.rb", "lib/cassandra/comparable.rb", "lib/cassandra/constants.rb", "lib/cassandra/debug.rb", "lib/cassandra/helpers.rb", "lib/cassandra/long.rb", "lib/cassandra/mock.rb", "lib/cassandra/ordered_hash.rb", "lib/cassandra/time.rb", "lib/cassandra.rb", "LICENSE", "Manifest", "Rakefile", "README.rdoc", "test/cassandra_client_test.rb", "test/cassandra_mock_test.rb", "test/cassandra_test.rb", "test/comparable_types_test.rb", "test/eventmachine_test.rb", "test/ordered_hash_test.rb", "test/test_helper.rb", "vendor/0.6/gen-rb/cassandra.rb", "vendor/0.6/gen-rb/cassandra_constants.rb", "vendor/0.6/gen-rb/cassandra_types.rb", "vendor/0.7/gen-rb/cassandra.rb", "vendor/0.7/gen-rb/cassandra_constants.rb", "vendor/0.7/gen-rb/cassandra_types.rb", "vendor/0.8/gen-rb/cassandra.rb", "vendor/0.8/gen-rb/cassandra_constants.rb", "vendor/0.8/gen-rb/cassandra_types.rb", "cassandra.gemspec"]
15
+ s.homepage = %q{http://fauna.github.com/fauna/cassandra/}
17
16
  s.rdoc_options = ["--line-numbers", "--inline-source", "--title", "Cassandra", "--main", "README.rdoc"]
18
17
  s.require_paths = ["lib"]
19
18
  s.rubyforge_project = %q{fauna}
20
- s.rubygems_version = %q{1.3.7}
19
+ s.rubygems_version = %q{1.7.2}
21
20
  s.summary = %q{A Ruby client for the Cassandra distributed database.}
22
21
  s.test_files = ["test/cassandra_client_test.rb", "test/cassandra_mock_test.rb", "test/cassandra_test.rb", "test/comparable_types_test.rb", "test/eventmachine_test.rb", "test/ordered_hash_test.rb", "test/test_helper.rb"]
23
22
 
24
23
  if s.respond_to? :specification_version then
25
- current_version = Gem::Specification::CURRENT_SPECIFICATION_VERSION
26
24
  s.specification_version = 3
27
25
 
28
26
  if Gem::Version.new(Gem::VERSION) >= Gem::Version.new('1.2.0') then
File without changes
File without changes
@@ -0,0 +1,48 @@
1
+ {"Twitter":{
2
+ "Users":{
3
+ "CompareWith":"org.apache.cassandra.db.marshal.UTF8Type",
4
+ "Type":"Standard"},
5
+ "UserAudits":{
6
+ "CompareWith":"org.apache.cassandra.db.marshal.UTF8Type",
7
+ "Type":"Standard"},
8
+ "UserRelationships":{
9
+ "CompareSubcolumnsWith":"org.apache.cassandra.db.marshal.TimeUUIDType",
10
+ "CompareWith":"org.apache.cassandra.db.marshal.UTF8Type",
11
+ "Type":"Super"},
12
+ "Usernames":{
13
+ "CompareWith":"org.apache.cassandra.db.marshal.UTF8Type",
14
+ "Type":"Standard"},
15
+ "Statuses":{
16
+ "CompareWith":"org.apache.cassandra.db.marshal.UTF8Type",
17
+ "Type":"Standard"},
18
+ "StatusAudits":{
19
+ "CompareWith":"org.apache.cassandra.db.marshal.UTF8Type",
20
+ "Type":"Standard"},
21
+ "StatusRelationships":{
22
+ "CompareSubcolumnsWith":"org.apache.cassandra.db.marshal.TimeUUIDType",
23
+ "CompareWith":"org.apache.cassandra.db.marshal.UTF8Type",
24
+ "Type":"Super"},
25
+ "Index":{
26
+ "CompareWith":"org.apache.cassandra.db.marshal.UTF8Type",
27
+ "Type":"Super"},
28
+ "TimelinishThings":{
29
+ "CompareWith":"org.apache.cassandra.db.marshal.BytesType",
30
+ "Type":"Standard"}
31
+ },
32
+ "Multiblog":{
33
+ "Blogs":{
34
+ "CompareWith":"org.apache.cassandra.db.marshal.TimeUUIDType",
35
+ "Type":"Standard"},
36
+ "Comments":{
37
+ "CompareWith":"org.apache.cassandra.db.marshal.TimeUUIDType",
38
+ "Type":"Standard"}
39
+ },
40
+ "MultiblogLong":{
41
+ "Blogs":{
42
+ "CompareWith":"org.apache.cassandra.db.marshal.LongType",
43
+ "Type":"Standard"},
44
+ "Comments":{
45
+ "CompareWith":"org.apache.cassandra.db.marshal.LongType",
46
+ "Type":"Standard"}
47
+ }
48
+ }
@@ -156,13 +156,13 @@
156
156
  ~ disk. Keep the data disks and the CommitLog disks separate for best
157
157
  ~ performance
158
158
  -->
159
- <CommitLogDirectory>data/cassandra/commitlog</CommitLogDirectory>
159
+ <CommitLogDirectory>data/commitlog</CommitLogDirectory>
160
160
  <DataFileDirectories>
161
- <DataFileDirectory>data/cassandra/data</DataFileDirectory>
161
+ <DataFileDirectory>data/data</DataFileDirectory>
162
162
  </DataFileDirectories>
163
- <CalloutLocation>data/cassandra/callouts</CalloutLocation>
164
- <StagingFileDirectory>data/cassandra/staging</StagingFileDirectory>
165
- <SavedCachesDirectory>data/cassandra/saved_caches</SavedCachesDirectory>
163
+ <CalloutLocation>data/callouts</CalloutLocation>
164
+ <StagingFileDirectory>data/staging</StagingFileDirectory>
165
+ <SavedCachesDirectory>data/saved_caches</SavedCachesDirectory>
166
166
 
167
167
 
168
168
  <!--
@@ -0,0 +1,46 @@
1
+ # Licensed to the Apache Software Foundation (ASF) under one
2
+ # or more contributor license agreements. See the NOTICE file
3
+ # distributed with this work for additional information
4
+ # regarding copyright ownership. The ASF licenses this file
5
+ # to you under the Apache License, Version 2.0 (the
6
+ # "License"); you may not use this file except in compliance
7
+ # with the License. You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ if [ "x$CASSANDRA_HOME" = "x" ]; then
18
+ CASSANDRA_HOME=`dirname $0`/..
19
+ fi
20
+
21
+ # The directory where Cassandra's configs live (required)
22
+ if [ "x$CASSANDRA_CONF" = "x" ]; then
23
+ CASSANDRA_CONF=$CASSANDRA_HOME/conf
24
+ fi
25
+
26
+ # This can be the path to a jar file, or a directory containing the
27
+ # compiled classes. NOTE: This isn't needed by the startup script,
28
+ # it's just used here in constructing the classpath.
29
+ cassandra_bin=$CASSANDRA_HOME/build/classes
30
+ #cassandra_bin=$cassandra_home/build/cassandra.jar
31
+
32
+ # JAVA_HOME can optionally be set here
33
+ #JAVA_HOME=/usr/local/jdk6
34
+
35
+ # The java classpath (required)
36
+ CLASSPATH=$CASSANDRA_CONF:$cassandra_bin
37
+
38
+ for jar in $CASSANDRA_HOME/lib/*.jar; do
39
+ CLASSPATH=$CLASSPATH:$jar
40
+ done
41
+
42
+ # Arguments to pass to the JVM
43
+ JVM_OPTS=" \
44
+ -ea \
45
+ -Xms128M \
46
+ -Xmx1G"
@@ -0,0 +1,336 @@
1
+ # Cassandra storage config YAML
2
+
3
+ # NOTE:
4
+ # See http://wiki.apache.org/cassandra/StorageConfiguration for
5
+ # full explanations of configuration directives
6
+ # /NOTE
7
+
8
+ # The name of the cluster. This is mainly used to prevent machines in
9
+ # one logical cluster from joining another.
10
+ cluster_name: 'Test Cluster'
11
+
12
+ # You should always specify InitialToken when setting up a production
13
+ # cluster for the first time, and often when adding capacity later.
14
+ # The principle is that each node should be given an equal slice of
15
+ # the token ring; see http://wiki.apache.org/cassandra/Operations
16
+ # for more details.
17
+ #
18
+ # If blank, Cassandra will request a token bisecting the range of
19
+ # the heaviest-loaded existing node. If there is no load information
20
+ # available, such as is the case with a new cluster, it will pick
21
+ # a random token, which will lead to hot spots.
22
+ initial_token:
23
+
24
+ # Set to true to make new [non-seed] nodes automatically migrate data
25
+ # to themselves from the pre-existing nodes in the cluster. Defaults
26
+ # to false because you can only bootstrap N machines at a time from
27
+ # an existing cluster of N, so if you are bringing up a cluster of
28
+ # 10 machines with 3 seeds you would have to do it in stages. Leaving
29
+ # this off for the initial start simplifies that.
30
+ auto_bootstrap: false
31
+
32
+ # See http://wiki.apache.org/cassandra/HintedHandoff
33
+ hinted_handoff_enabled: true
34
+ # this defines the maximum amount of time a dead host will have hints
35
+ # generated. After it has been dead this long, hints will be dropped.
36
+ max_hint_window_in_ms: 3600000 # one hour
37
+ # Sleep this long after delivering each row or row fragment
38
+ hinted_handoff_throttle_delay_in_ms: 50
39
+
40
+ # authentication backend, implementing IAuthenticator; used to identify users
41
+ authenticator: org.apache.cassandra.auth.AllowAllAuthenticator
42
+
43
+ # authorization backend, implementing IAuthority; used to limit access/provide permissions
44
+ authority: org.apache.cassandra.auth.AllowAllAuthority
45
+
46
+ # The partitioner is responsible for distributing rows (by key) across
47
+ # nodes in the cluster. Any IPartitioner may be used, including your
48
+ # own as long as it is on the classpath. Out of the box, Cassandra
49
+ # provides org.apache.cassandra.dht.RandomPartitioner
50
+ # org.apache.cassandra.dht.ByteOrderedPartitioner,
51
+ # org.apache.cassandra.dht.OrderPreservingPartitioner (deprecated),
52
+ # and org.apache.cassandra.dht.CollatingOrderPreservingPartitioner
53
+ # (deprecated).
54
+ #
55
+ # - RandomPartitioner distributes rows across the cluster evenly by md5.
56
+ # When in doubt, this is the best option.
57
+ # - ByteOrderedPartitioner orders rows lexically by key bytes. BOP allows
58
+ # scanning rows in key order, but the ordering can generate hot spots
59
+ # for sequential insertion workloads.
60
+ # - OrderPreservingPartitioner is an obsolete form of BOP, that stores
61
+ # - keys in a less-efficient format and only works with keys that are
62
+ # UTF8-encoded Strings.
63
+ # - CollatingOPP colates according to EN,US rules rather than lexical byte
64
+ # ordering. Use this as an example if you need custom collation.
65
+ #
66
+ # See http://wiki.apache.org/cassandra/Operations for more on
67
+ # partitioners and token selection.
68
+ partitioner: org.apache.cassandra.dht.RandomPartitioner
69
+
70
+ # directories where Cassandra should store data on disk.
71
+ data_file_directories:
72
+ - data/data
73
+
74
+ # commit log
75
+ commitlog_directory: data/commitlog
76
+
77
+ # saved caches
78
+ saved_caches_directory: data/saved_caches
79
+
80
+ # Size to allow commitlog to grow to before creating a new segment
81
+ commitlog_rotation_threshold_in_mb: 128
82
+
83
+ # commitlog_sync may be either "periodic" or "batch."
84
+ # When in batch mode, Cassandra won't ack writes until the commit log
85
+ # has been fsynced to disk. It will wait up to
86
+ # CommitLogSyncBatchWindowInMS milliseconds for other writes, before
87
+ # performing the sync.
88
+ commitlog_sync: periodic
89
+
90
+ # the other option is "periodic" where writes may be acked immediately
91
+ # and the CommitLog is simply synced every commitlog_sync_period_in_ms
92
+ # milliseconds.
93
+ commitlog_sync_period_in_ms: 10000
94
+
95
+ # emergency pressure valve: each time heap usage after a full (CMS)
96
+ # garbage collection is above this fraction of the max, Cassandra will
97
+ # flush the largest memtables.
98
+ #
99
+ # Set to 1.0 to disable. Setting this lower than
100
+ # CMSInitiatingOccupancyFraction is not likely to be useful.
101
+ #
102
+ # RELYING ON THIS AS YOUR PRIMARY TUNING MECHANISM WILL WORK POORLY:
103
+ # it is most effective under light to moderate load, or read-heavy
104
+ # workloads; under truly massive write load, it will often be too
105
+ # little, too late.
106
+ flush_largest_memtables_at: 0.75
107
+
108
+ # emergency pressure valve #2: the first time heap usage after a full
109
+ # (CMS) garbage collection is above this fraction of the max,
110
+ # Cassandra will reduce cache maximum _capacity_ to the given fraction
111
+ # of the current _size_. Should usually be set substantially above
112
+ # flush_largest_memtables_at, since that will have less long-term
113
+ # impact on the system.
114
+ #
115
+ # Set to 1.0 to disable. Setting this lower than
116
+ # CMSInitiatingOccupancyFraction is not likely to be useful.
117
+ reduce_cache_sizes_at: 0.85
118
+ reduce_cache_capacity_to: 0.6
119
+
120
+ # Addresses of hosts that are deemed contact points.
121
+ # Cassandra nodes use this list of hosts to find each other and learn
122
+ # the topology of the ring. You must change this if you are running
123
+ # multiple nodes!
124
+ seeds:
125
+ - 127.0.0.1
126
+
127
+ # Access mode. mmapped i/o is substantially faster, but only practical on
128
+ # a 64bit machine (which notably does not include EC2 "small" instances)
129
+ # or relatively small datasets. "auto", the safe choice, will enable
130
+ # mmapping on a 64bit JVM. Other values are "mmap", "mmap_index_only"
131
+ # (which may allow you to get part of the benefits of mmap on a 32bit
132
+ # machine by mmapping only index files) and "standard".
133
+ # (The buffer size settings that follow only apply to standard,
134
+ # non-mmapped i/o.)
135
+ disk_access_mode: auto
136
+
137
+ # For workloads with more data than can fit in memory, Cassandra's
138
+ # bottleneck will be reads that need to fetch data from
139
+ # disk. "concurrent_reads" should be set to (16 * number_of_drives) in
140
+ # order to allow the operations to enqueue low enough in the stack
141
+ # that the OS and drives can reorder them.
142
+ #
143
+ # On the other hand, since writes are almost never IO bound, the ideal
144
+ # number of "concurrent_writes" is dependent on the number of cores in
145
+ # your system; (8 * number_of_cores) is a good rule of thumb.
146
+ concurrent_reads: 32
147
+ concurrent_writes: 32
148
+
149
+ # This sets the amount of memtable flush writer threads. These will
150
+ # be blocked by disk io, and each one will hold a memtable in memory
151
+ # while blocked. If you have a large heap and many data directories,
152
+ # you can increase this value for better flush performance.
153
+ # By default this will be set to the amount of data directories defined.
154
+ #memtable_flush_writers: 1
155
+
156
+ # Buffer size to use when performing contiguous column slices.
157
+ # Increase this to the size of the column slices you typically perform
158
+ sliced_buffer_size_in_kb: 64
159
+
160
+ # TCP port, for commands and data
161
+ storage_port: 7000
162
+
163
+ # Address to bind to and tell other Cassandra nodes to connect to. You
164
+ # _must_ change this if you want multiple nodes to be able to
165
+ # communicate!
166
+ #
167
+ # Leaving it blank leaves it up to InetAddress.getLocalHost(). This
168
+ # will always do the Right Thing *if* the node is properly configured
169
+ # (hostname, name resolution, etc), and the Right Thing is to use the
170
+ # address associated with the hostname (it might not be).
171
+ #
172
+ # Setting this to 0.0.0.0 is always wrong.
173
+ listen_address: localhost
174
+
175
+ # The address to bind the Thrift RPC service to -- clients connect
176
+ # here. Unlike ListenAddress above, you *can* specify 0.0.0.0 here if
177
+ # you want Thrift to listen on all interfaces.
178
+ #
179
+ # Leaving this blank has the same effect it does for ListenAddress,
180
+ # (i.e. it will be based on the configured hostname of the node).
181
+ rpc_address: localhost
182
+ # port for Thrift to listen for clients on
183
+ rpc_port: 9160
184
+
185
+ # enable or disable keepalive on rpc connections
186
+ rpc_keepalive: true
187
+
188
+ # Cassandra uses thread-per-client for client RPC. This can
189
+ # be expensive in memory used for thread stack for a large
190
+ # enough number of clients. (Hence, connection pooling is
191
+ # very, very strongly recommended.)
192
+ #
193
+ # Uncomment rpc_min|max|thread to set request pool size.
194
+ # You would primarily set max as a safeguard against misbehaved
195
+ # clients; if you do hit the max, Cassandra will block until
196
+ # one disconnects before accepting more. The defaults are
197
+ # min of 16 and max unlimited.
198
+ #
199
+ # rpc_min_threads: 16
200
+ # rpc_max_threads: 2048
201
+
202
+ # uncomment to set socket buffer sizes on rpc connections
203
+ # rpc_send_buff_size_in_bytes:
204
+ # rpc_recv_buff_size_in_bytes:
205
+
206
+ # Frame size for thrift (maximum field length).
207
+ # 0 disables TFramedTransport in favor of TSocket. This option
208
+ # is deprecated; we strongly recommend using Framed mode.
209
+ thrift_framed_transport_size_in_mb: 15
210
+
211
+ # The max length of a thrift message, including all fields and
212
+ # internal thrift overhead.
213
+ thrift_max_message_length_in_mb: 16
214
+
215
+ # Set to true to have Cassandra create a hard link to each sstable
216
+ # flushed or streamed locally in a backups/ subdirectory of the
217
+ # Keyspace data. Removing these links is the operator's
218
+ # responsibility.
219
+ incremental_backups: false
220
+
221
+ # Whether or not to take a snapshot before each compaction. Be
222
+ # careful using this option, since Cassandra won't clean up the
223
+ # snapshots for you. Mostly useful if you're paranoid when there
224
+ # is a data format change.
225
+ snapshot_before_compaction: false
226
+
227
+ # change this to increase the compaction thread's priority. In java, 1 is the
228
+ # lowest priority and that is our default.
229
+ # compaction_thread_priority: 1
230
+
231
+ # Add column indexes to a row after its contents reach this size.
232
+ # Increase if your column values are large, or if you have a very large
233
+ # number of columns. The competing causes are, Cassandra has to
234
+ # deserialize this much of the row to read a single column, so you want
235
+ # it to be small - at least if you do many partial-row reads - but all
236
+ # the index data is read for each access, so you don't want to generate
237
+ # that wastefully either.
238
+ column_index_size_in_kb: 64
239
+
240
+ # Size limit for rows being compacted in memory. Larger rows will spill
241
+ # over to disk and use a slower two-pass compaction process. A message
242
+ # will be logged specifying the row key.
243
+ in_memory_compaction_limit_in_mb: 64
244
+
245
+ # Track cached row keys during compaction, and re-cache their new
246
+ # positions in the compacted sstable. Disable if you use really large
247
+ # key caches.
248
+ compaction_preheat_key_cache: true
249
+
250
+ # Time to wait for a reply from other nodes before failing the command
251
+ rpc_timeout_in_ms: 10000
252
+
253
+ # phi value that must be reached for a host to be marked down.
254
+ # most users should never need to adjust this.
255
+ # phi_convict_threshold: 8
256
+
257
+ # endpoint_snitch -- Set this to a class that implements
258
+ # IEndpointSnitch, which will let Cassandra know enough
259
+ # about your network topology to route requests efficiently.
260
+ # Out of the box, Cassandra provides
261
+ # - org.apache.cassandra.locator.SimpleSnitch:
262
+ # Treats Strategy order as proximity. This improves cache locality
263
+ # when disabling read repair, which can further improve throughput.
264
+ # - org.apache.cassandra.locator.RackInferringSnitch:
265
+ # Proximity is determined by rack and data center, which are
266
+ # assumed to correspond to the 3rd and 2nd octet of each node's
267
+ # IP address, respectively
268
+ # org.apache.cassandra.locator.PropertyFileSnitch:
269
+ # - Proximity is determined by rack and data center, which are
270
+ # explicitly configured in cassandra-topology.properties.
271
+ endpoint_snitch: org.apache.cassandra.locator.SimpleSnitch
272
+
273
+ # dynamic_snitch -- This boolean controls whether the above snitch is
274
+ # wrapped with a dynamic snitch, which will monitor read latencies
275
+ # and avoid reading from hosts that have slowed (due to compaction,
276
+ # for instance)
277
+ dynamic_snitch: true
278
+ # controls how often to perform the more expensive part of host score
279
+ # calculation
280
+ dynamic_snitch_update_interval_in_ms: 100
281
+ # controls how often to reset all host scores, allowing a bad host to
282
+ # possibly recover
283
+ dynamic_snitch_reset_interval_in_ms: 600000
284
+ # if set greater than zero and read_repair_chance is < 1.0, this will allow
285
+ # 'pinning' of replicas to hosts in order to increase cache capacity.
286
+ # The badness threshold will control how much worse the pinned host has to be
287
+ # before the dynamic snitch will prefer other replicas over it. This is
288
+ # expressed as a double which represents a percentage. Thus, a value of
289
+ # 0.2 means Cassandra would continue to prefer the static snitch values
290
+ # until the pinned host was 20% worse than the fastest.
291
+ dynamic_snitch_badness_threshold: 0.0
292
+
293
+ # request_scheduler -- Set this to a class that implements
294
+ # RequestScheduler, which will schedule incoming client requests
295
+ # according to the specific policy. This is useful for multi-tenancy
296
+ # with a single Cassandra cluster.
297
+ # NOTE: This is specifically for requests from the client and does
298
+ # not affect inter node communication.
299
+ # org.apache.cassandra.scheduler.NoScheduler - No scheduling takes place
300
+ # org.apache.cassandra.scheduler.RoundRobinScheduler - Round robin of
301
+ # client requests to a node with a separate queue for each
302
+ # request_scheduler_id. The scheduler is further customized by
303
+ # request_scheduler_options as described below.
304
+ request_scheduler: org.apache.cassandra.scheduler.NoScheduler
305
+
306
+ # Scheduler Options vary based on the type of scheduler
307
+ # NoScheduler - Has no options
308
+ # RoundRobin
309
+ # - throttle_limit -- The throttle_limit is the number of in-flight
310
+ # requests per client. Requests beyond
311
+ # that limit are queued up until
312
+ # running requests can complete.
313
+ # The value of 80 here is twice the number of
314
+ # concurrent_reads + concurrent_writes.
315
+ # - default_weight -- default_weight is optional and allows for
316
+ # overriding the default which is 1.
317
+ # - weights -- Weights are optional and will default to 1 or the
318
+ # overridden default_weight. The weight translates into how
319
+ # many requests are handled during each turn of the
320
+ # RoundRobin, based on the scheduler id.
321
+ #
322
+ # request_scheduler_options:
323
+ # throttle_limit: 80
324
+ # default_weight: 5
325
+ # weights:
326
+ # Keyspace1: 1
327
+ # Keyspace2: 5
328
+
329
+ # request_scheduler_id -- An identifer based on which to perform
330
+ # the request scheduling. Currently the only valid option is keyspace.
331
+ # request_scheduler_id: keyspace
332
+
333
+ # The Index Interval determines how large the sampling of row keys
334
+ # is for a given SSTable. The larger the sampling, the more effective
335
+ # the index is at the cost of space.
336
+ index_interval: 128