cassandra-model 0.1.1 → 0.2.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
data/Rakefile CHANGED
@@ -54,7 +54,7 @@ Rake::RDocTask.new do |rdoc|
54
54
  end
55
55
 
56
56
 
57
- CASSANDRA_HOME = ENV["CASSANDRA_HOME"] || "#{ENV["HOME"]}/apache-cassandra-0.6.0"
57
+ CASSANDRA_HOME = ENV["CASSANDRA_HOME"] || "#{ENV["HOME"]}/apache-cassandra"
58
58
  CASSANDRA_PID = ENV["CASSANDRA_PID"] || "/tmp/cassandra.pid".freeze
59
59
 
60
60
  cassandra_env = ""
data/VERSION CHANGED
@@ -1 +1 @@
1
- 0.1.1
1
+ 0.2.0
@@ -5,11 +5,11 @@
5
5
 
6
6
  Gem::Specification.new do |s|
7
7
  s.name = %q{cassandra-model}
8
- s.version = "0.1.1"
8
+ s.version = "0.2.0"
9
9
 
10
10
  s.required_rubygems_version = Gem::Requirement.new(">= 0") if s.respond_to? :required_rubygems_version=
11
11
  s.authors = ["Tien Le"]
12
- s.date = %q{2010-12-22}
12
+ s.date = %q{2011-01-23}
13
13
  s.description = %q{Cassandra-model allows you to map ColumnFamily/SuperColumnFamily in Cassandra to Ruby objects. It was designed to be fast and simple.}
14
14
  s.email = %q{tienlx /at/ gmail /dot/ com}
15
15
  s.extra_rdoc_files = [
@@ -33,8 +33,9 @@ Gem::Specification.new do |s|
33
33
  "test/callbacks_test.rb",
34
34
  "test/cassandra_model_test.rb",
35
35
  "test/config/cassandra.in.sh",
36
+ "test/config/cassandra.yaml",
37
+ "test/config/log4j-server.properties",
36
38
  "test/config/log4j-tools.properties",
37
- "test/config/log4j.properties",
38
39
  "test/config/storage-conf.xml",
39
40
  "test/test_helper.rb"
40
41
  ]
@@ -1,4 +1,4 @@
1
- require 'cassandra'
1
+ require 'cassandra/0.7'
2
2
  require 'forwardable'
3
3
  require 'date'
4
4
 
@@ -59,8 +59,8 @@ module CassandraModel
59
59
  end
60
60
  end
61
61
 
62
- attr_accessor :key, :new_record
63
- attr_reader :attributes, :errors
62
+ attr_accessor :key, :new_record, :errors
63
+ attr_reader :attributes
64
64
 
65
65
  def initialize(attrs = {}, convert = true)
66
66
  @new_record = true
@@ -114,6 +114,10 @@ module CassandraModel
114
114
  connection.remove(column_family, key, column,
115
115
  :consistency => @write_consistency_level || Cassandra::Consistency::QUORUM)
116
116
  end
117
+
118
+ def truncate!
119
+ connection.truncate!(column_family.to_s)
120
+ end
117
121
  end
118
122
  end
119
123
  end
@@ -80,5 +80,11 @@ class CassandraModelTest < Test::Unit::TestCase
80
80
  user.save
81
81
  assert_equal ["created_at", "full_name"], @connection.get(:Users, "abc").keys
82
82
  end
83
+
84
+ should "truncate the column family" do
85
+ assert !User.all.empty?
86
+ User.truncate!
87
+ assert User.all.empty?
88
+ end
83
89
  end
84
90
  end
@@ -0,0 +1,427 @@
1
+ # Cassandra storage config YAML
2
+
3
+ # NOTE:
4
+ # See http://wiki.apache.org/cassandra/StorageConfiguration for
5
+ # full explanations of configuration directives
6
+ # /NOTE
7
+
8
+ # The name of the cluster. This is mainly used to prevent machines in
9
+ # one logical cluster from joining another.
10
+ cluster_name: 'Test Cluster'
11
+
12
+ # You should always specify InitialToken when setting up a production
13
+ # cluster for the first time, and often when adding capacity later.
14
+ # The principle is that each node should be given an equal slice of
15
+ # the token ring; see http://wiki.apache.org/cassandra/Operations
16
+ # for more details.
17
+ #
18
+ # If blank, Cassandra will request a token bisecting the range of
19
+ # the heaviest-loaded existing node. If there is no load information
20
+ # available, such as is the case with a new cluster, it will pick
21
+ # a random token, which will lead to hot spots.
22
+ initial_token:
23
+
24
+ # Set to true to make new [non-seed] nodes automatically migrate data
25
+ # to themselves from the pre-existing nodes in the cluster. Defaults
26
+ # to false because you can only bootstrap N machines at a time from
27
+ # an existing cluster of N, so if you are bringing up a cluster of
28
+ # 10 machines with 3 seeds you would have to do it in stages. Leaving
29
+ # this off for the initial start simplifies that.
30
+ auto_bootstrap: false
31
+
32
+ # See http://wiki.apache.org/cassandra/HintedHandoff
33
+ hinted_handoff_enabled: true
34
+
35
+ # authentication backend, implementing IAuthenticator; used to identify users
36
+ authenticator: org.apache.cassandra.auth.AllowAllAuthenticator
37
+
38
+ # authorization backend, implementing IAuthority; used to limit access/provide permissions
39
+ authority: org.apache.cassandra.auth.AllowAllAuthority
40
+
41
+ # The partitioner is responsible for distributing rows (by key) across
42
+ # nodes in the cluster. Any IPartitioner may be used, including your
43
+ # own as long as it is on the classpath. Out of the box, Cassandra
44
+ # provides org.apache.cassandra.dht.RandomPartitioner
45
+ # org.apache.cassandra.dht.ByteOrderedPartitioner,
46
+ # org.apache.cassandra.dht.OrderPreservingPartitioner (deprecated),
47
+ # and org.apache.cassandra.dht.CollatingOrderPreservingPartitioner
48
+ # (deprecated).
49
+ #
50
+ # - RandomPartitioner distributes rows across the cluster evenly by md5.
51
+ # When in doubt, this is the best option.
52
+ # - ByteOrderedPartitioner orders rows lexically by key bytes. BOP allows
53
+ # scanning rows in key order, but the ordering can generate hot spots
54
+ # for sequential insertion workloads.
55
+ # - OrderPreservingPartitioner is an obsolete form of BOP, that stores
56
+ # - keys in a less-efficient format and only works with keys that are
57
+ # UTF8-encoded Strings.
58
+ # - CollatingOPP colates according to EN,US rules rather than lexical byte
59
+ # ordering. Use this as an example if you need custom collation.
60
+ #
61
+ # See http://wiki.apache.org/cassandra/Operations for more on
62
+ # partitioners and token selection.
63
+ partitioner: org.apache.cassandra.dht.RandomPartitioner
64
+
65
+ # directories where Cassandra should store data on disk.
66
+ data_file_directories:
67
+ - /var/lib/cassandra/data
68
+
69
+ # commit log
70
+ commitlog_directory: /var/lib/cassandra/commitlog
71
+
72
+ # saved caches
73
+ saved_caches_directory: /var/lib/cassandra/saved_caches
74
+
75
+ # Size to allow commitlog to grow to before creating a new segment
76
+ commitlog_rotation_threshold_in_mb: 128
77
+
78
+ # commitlog_sync may be either "periodic" or "batch."
79
+ # When in batch mode, Cassandra won't ack writes until the commit log
80
+ # has been fsynced to disk. It will wait up to
81
+ # CommitLogSyncBatchWindowInMS milliseconds for other writes, before
82
+ # performing the sync.
83
+ commitlog_sync: periodic
84
+
85
+ # the other option is "timed," where writes may be acked immediately
86
+ # and the CommitLog is simply synced every commitlog_sync_period_in_ms
87
+ # milliseconds.
88
+ commitlog_sync_period_in_ms: 10000
89
+
90
+ # Addresses of hosts that are deemed contact points.
91
+ # Cassandra nodes use this list of hosts to find each other and learn
92
+ # the topology of the ring. You must change this if you are running
93
+ # multiple nodes!
94
+ seeds:
95
+ - 127.0.0.1
96
+
97
+ # Access mode. mmapped i/o is substantially faster, but only practical on
98
+ # a 64bit machine (which notably does not include EC2 "small" instances)
99
+ # or relatively small datasets. "auto", the safe choice, will enable
100
+ # mmapping on a 64bit JVM. Other values are "mmap", "mmap_index_only"
101
+ # (which may allow you to get part of the benefits of mmap on a 32bit
102
+ # machine by mmapping only index files) and "standard".
103
+ # (The buffer size settings that follow only apply to standard,
104
+ # non-mmapped i/o.)
105
+ disk_access_mode: auto
106
+
107
+ # Unlike most systems, in Cassandra writes are faster than reads, so
108
+ # you can afford more of those in parallel. A good rule of thumb is 2
109
+ # concurrent reads per processor core. Increase ConcurrentWrites to
110
+ # the number of clients writing at once if you enable CommitLogSync +
111
+ # CommitLogSyncDelay. -->
112
+ concurrent_reads: 8
113
+ concurrent_writes: 32
114
+
115
+ # This sets the amount of memtable flush writer threads. These will
116
+ # be blocked by disk io, and each one will hold a memtable in memory
117
+ # while blocked. If you have a large heap and many data directories,
118
+ # you can increase this value for better flush performance.
119
+ # By default this will be set to the amount of data directories defined.
120
+ #memtable_flush_writers: 1
121
+
122
+ # Buffer size to use when performing contiguous column slices.
123
+ # Increase this to the size of the column slices you typically perform
124
+ sliced_buffer_size_in_kb: 64
125
+
126
+ # TCP port, for commands and data
127
+ storage_port: 7000
128
+
129
+ # Address to bind to and tell other Cassandra nodes to connect to. You
130
+ # _must_ change this if you want multiple nodes to be able to
131
+ # communicate!
132
+ #
133
+ # Leaving it blank leaves it up to InetAddress.getLocalHost(). This
134
+ # will always do the Right Thing *if* the node is properly configured
135
+ # (hostname, name resolution, etc), and the Right Thing is to use the
136
+ # address associated with the hostname (it might not be).
137
+ #
138
+ # Setting this to 0.0.0.0 is always wrong.
139
+ listen_address: localhost
140
+
141
+ # The address to bind the Thrift RPC service to -- clients connect
142
+ # here. Unlike ListenAddress above, you *can* specify 0.0.0.0 here if
143
+ # you want Thrift to listen on all interfaces.
144
+ #
145
+ # Leaving this blank has the same effect it does for ListenAddress,
146
+ # (i.e. it will be based on the configured hostname of the node).
147
+ rpc_address: localhost
148
+ # port for Thrift to listen for clients on
149
+ rpc_port: 9160
150
+
151
+ # enable or disable keepalive on rpc connections
152
+ rpc_keepalive: true
153
+
154
+ # uncomment to set socket buffer sizes on rpc connections
155
+ # rpc_send_buff_size_in_bytes:
156
+ # rpc_recv_buff_size_in_bytes:
157
+
158
+ # Frame size for thrift (maximum field length).
159
+ # 0 disables TFramedTransport in favor of TSocket. This option
160
+ # is deprecated; we strongly recommend using Framed mode.
161
+ thrift_framed_transport_size_in_mb: 15
162
+
163
+ # The max length of a thrift message, including all fields and
164
+ # internal thrift overhead.
165
+ thrift_max_message_length_in_mb: 16
166
+
167
+ # Whether or not to take a snapshot before each compaction. Be
168
+ # careful using this option, since Cassandra won't clean up the
169
+ # snapshots for you. Mostly useful if you're paranoid when there
170
+ # is a data format change.
171
+ snapshot_before_compaction: false
172
+
173
+ # change this to increase the compaction thread's priority. In java, 1 is the
174
+ # lowest priority and that is our default.
175
+ # compaction_thread_priority: 1
176
+
177
+ # The threshold size in megabytes the binary memtable must grow to,
178
+ # before it's submitted for flushing to disk.
179
+ binary_memtable_throughput_in_mb: 256
180
+
181
+ # Add column indexes to a row after its contents reach this size.
182
+ # Increase if your column values are large, or if you have a very large
183
+ # number of columns. The competing causes are, Cassandra has to
184
+ # deserialize this much of the row to read a single column, so you want
185
+ # it to be small - at least if you do many partial-row reads - but all
186
+ # the index data is read for each access, so you don't want to generate
187
+ # that wastefully either.
188
+ column_index_size_in_kb: 64
189
+
190
+ # Size limit for rows being compacted in memory. Larger rows will spill
191
+ # over to disk and use a slower two-pass compaction process. A message
192
+ # will be logged specifying the row key.
193
+ in_memory_compaction_limit_in_mb: 64
194
+
195
+ # Time to wait for a reply from other nodes before failing the command
196
+ rpc_timeout_in_ms: 10000
197
+
198
+ # phi value that must be reached for a host to be marked down.
199
+ # most users should never need to adjust this.
200
+ # phi_convict_threshold: 8
201
+
202
+ # endpoint_snitch -- Set this to a class that implements
203
+ # IEndpointSnitch, which will let Cassandra know enough
204
+ # about your network topology to route requests efficiently.
205
+ # Out of the box, Cassandra provides
206
+ # - org.apache.cassandra.locator.SimpleSnitch:
207
+ # Treats Strategy order as proximity. This improves cache locality
208
+ # when disabling read repair, which can further improve throughput.
209
+ # - org.apache.cassandra.locator.RackInferringSnitch:
210
+ # Proximity is determined by rack and data center, which are
211
+ # assumed to correspond to the 3rd and 2nd octet of each node's
212
+ # IP address, respectively
213
+ # org.apache.cassandra.locator.PropertyFileSnitch:
214
+ # - Proximity is determined by rack and data center, which are
215
+ # explicitly configured in cassandra-topology.properties.
216
+ endpoint_snitch: org.apache.cassandra.locator.SimpleSnitch
217
+
218
+ # dynamic_snitch -- This boolean controls whether the above snitch is
219
+ # wrapped with a dynamic snitch, which will monitor read latencies
220
+ # and avoid reading from hosts that have slowed (due to compaction,
221
+ # for instance)
222
+ dynamic_snitch: true
223
+ # controls how often to perform the more expensive part of host score
224
+ # calculation
225
+ dynamic_snitch_update_interval_in_ms: 100
226
+ # controls how often to reset all host scores, allowing a bad host to
227
+ # possibly recover
228
+ dynamic_snitch_reset_interval_in_ms: 600000
229
+ # if set greater than zero and read_repair_chance is < 1.0, this will allow
230
+ # 'pinning' of replicas to hosts in order to increase cache capacity.
231
+ # The badness threshold will control how much worse the pinned host has to be
232
+ # before the dynamic snitch will prefer other replicas over it. This is
233
+ # expressed as a double which represents a percentage. Thus, a value of
234
+ # 0.2 means Cassandra would continue to prefer the static snitch values
235
+ # until the pinned host was 20% worse than the fastest.
236
+ dynamic_snitch_badness_threshold: 0.0
237
+
238
+ # request_scheduler -- Set this to a class that implements
239
+ # RequestScheduler, which will schedule incoming client requests
240
+ # according to the specific policy. This is useful for multi-tenancy
241
+ # with a single Cassandra cluster.
242
+ # NOTE: This is specifically for requests from the client and does
243
+ # not affect inter node communication.
244
+ # org.apache.cassandra.scheduler.NoScheduler - No scheduling takes place
245
+ # org.apache.cassandra.scheduler.RoundRobinScheduler - Round robin of
246
+ # client requests to a node with a separate queue for each
247
+ # request_scheduler_id. The scheduler is further customized by
248
+ # request_scheduler_options as described below.
249
+ request_scheduler: org.apache.cassandra.scheduler.NoScheduler
250
+
251
+ # Scheduler Options vary based on the type of scheduler
252
+ # NoScheduler - Has no options
253
+ # RoundRobin
254
+ # - throttle_limit -- The throttle_limit is the number of in-flight
255
+ # requests per client. Requests beyond
256
+ # that limit are queued up until
257
+ # running requests can complete.
258
+ # The value of 80 here is twice the number of
259
+ # concurrent_reads + concurrent_writes.
260
+ # - default_weight -- default_weight is optional and allows for
261
+ # overriding the default which is 1.
262
+ # - weights -- Weights are optional and will default to 1 or the
263
+ # overridden default_weight. The weight translates into how
264
+ # many requests are handled during each turn of the
265
+ # RoundRobin, based on the scheduler id.
266
+ #
267
+ # request_scheduler_options:
268
+ # throttle_limit: 80
269
+ # default_weight: 5
270
+ # weights:
271
+ # Keyspace1: 1
272
+ # Keyspace2: 5
273
+
274
+ # request_scheduler_id -- An identifer based on which to perform
275
+ # the request scheduling. Currently the only valid option is keyspace.
276
+ # request_scheduler_id: keyspace
277
+
278
+ # The Index Interval determines how large the sampling of row keys
279
+ # is for a given SSTable. The larger the sampling, the more effective
280
+ # the index is at the cost of space.
281
+ index_interval: 128
282
+
283
+ # Keyspaces have ColumnFamilies. (Usually 1 KS per application.)
284
+ # ColumnFamilies have Rows. (Dozens of CFs per KS.)
285
+ # Rows contain Columns. (Many per CF.)
286
+ # Columns contain name:value:timestamp. (Many per Row.)
287
+ #
288
+ # A KS is most similar to a schema, and a CF is most similar to a relational table.
289
+ #
290
+ # Keyspaces, ColumnFamilies, and Columns may carry additional
291
+ # metadata that change their behavior. These are as follows:
292
+ #
293
+ # Keyspace required parameters:
294
+ # - name: name of the keyspace; "system" is
295
+ # reserved for Cassandra Internals.
296
+ # - replica_placement_strategy: the class that determines how replicas
297
+ # are distributed among nodes. Contains both the class as well as
298
+ # configuration information. Must extend AbstractReplicationStrategy.
299
+ # Out of the box, Cassandra provides
300
+ # * org.apache.cassandra.locator.SimpleStrategy
301
+ # * org.apache.cassandra.locator.NetworkTopologyStrategy
302
+ # * org.apache.cassandra.locator.OldNetworkTopologyStrategy
303
+ #
304
+ # SimpleStrategy merely places the first
305
+ # replica at the node whose token is closest to the key (as determined
306
+ # by the Partitioner), and additional replicas on subsequent nodes
307
+ # along the ring in increasing Token order.
308
+ #
309
+ # With NetworkTopologyStrategy,
310
+ # for each datacenter, you can specify how many replicas you want
311
+ # on a per-keyspace basis. Replicas are placed on different racks
312
+ # within each DC, if possible. This strategy also requires rack aware
313
+ # snitch, such as RackInferringSnitch or PropertyFileSnitch.
314
+ # An example:
315
+ # - name: Keyspace1
316
+ # replica_placement_strategy: org.apache.cassandra.locator.NetworkTopologyStrategy
317
+ # strategy_options:
318
+ # DC1 : 3
319
+ # DC2 : 2
320
+ # DC3 : 1
321
+ #
322
+ # OldNetworkToplogyStrategy [formerly RackAwareStrategy]
323
+ # places one replica in each of two datacenters, and the third on a
324
+ # different rack in in the first. Additional datacenters are not
325
+ # guaranteed to get a replica. Additional replicas after three are placed
326
+ # in ring order after the third without regard to rack or datacenter.
327
+ # - replication_factor: Number of replicas of each row
328
+ # Keyspace optional paramaters:
329
+ # - strategy_options: Additional information for the replication strategy.
330
+ # - column_families:
331
+ # ColumnFamily required parameters:
332
+ # - name: name of the ColumnFamily. Must not contain the character "-".
333
+ # - compare_with: tells Cassandra how to sort the columns for slicing
334
+ # operations. The default is BytesType, which is a straightforward
335
+ # lexical comparison of the bytes in each column. Other options are
336
+ # AsciiType, UTF8Type, LexicalUUIDType, TimeUUIDType, LongType,
337
+ # and IntegerType (a generic variable-length integer type).
338
+ # You can also specify the fully-qualified class name to a class of
339
+ # your choice extending org.apache.cassandra.db.marshal.AbstractType.
340
+ #
341
+ # ColumnFamily optional parameters:
342
+ # - keys_cached: specifies the number of keys per sstable whose
343
+ # locations we keep in memory in "mostly LRU" order. (JUST the key
344
+ # locations, NOT any column values.) Specify a fraction (value less
345
+ # than 1) or an absolute number of keys to cache. Defaults to 200000
346
+ # keys.
347
+ # - rows_cached: specifies the number of rows whose entire contents we
348
+ # cache in memory. Do not use this on ColumnFamilies with large rows,
349
+ # or ColumnFamilies with high write:read ratios. Specify a fraction
350
+ # (value less than 1) or an absolute number of rows to cache.
351
+ # Defaults to 0. (i.e. row caching is off by default)
352
+ # - comment: used to attach additional human-readable information about
353
+ # the column family to its definition.
354
+ # - read_repair_chance: specifies the probability with which read
355
+ # repairs should be invoked on non-quorum reads. must be between 0
356
+ # and 1. defaults to 1.0 (always read repair).
357
+ # - gc_grace_seconds: specifies the time to wait before garbage
358
+ # collecting tombstones (deletion markers). defaults to 864000 (10
359
+ # days). See http://wiki.apache.org/cassandra/DistributedDeletes
360
+ # - default_validation_class: specifies a validator class to use for
361
+ # validating all the column values in the CF.
362
+ # NOTE:
363
+ # min_ must be less than max_compaction_threshold!
364
+ # - min_compaction_threshold: the minimum number of SSTables needed
365
+ # to start a minor compaction. increasing this will cause minor
366
+ # compactions to start less frequently and be more intensive. setting
367
+ # this to 0 disables minor compactions. defaults to 4.
368
+ # - max_compaction_threshold: the maximum number of SSTables allowed
369
+ # before a minor compaction is forced. decreasing this will cause
370
+ # minor compactions to start more frequently and be less intensive.
371
+ # setting this to 0 disables minor compactions. defaults to 32.
372
+ # /NOTE
373
+ # - row_cache_save_period_in_seconds: number of seconds between saving
374
+ # row caches. The row caches can be saved periodically and if one
375
+ # exists on startup it will be loaded.
376
+ # - key_cache_save_period_in_seconds: number of seconds between saving
377
+ # key caches. The key caches can be saved periodically and if one
378
+ # exists on startup it will be loaded.
379
+ # - memtable_flush_after_mins: The maximum time to leave a dirty table
380
+ # unflushed. This should be large enough that it won't cause a flush
381
+ # storm of all memtables during periods of inactivity.
382
+ # - memtable_throughput_in_mb: The maximum size of the memtable before
383
+ # it is flushed. If undefined, 1/8 * heapsize will be used.
384
+ # - memtable_operations_in_millions: Number of operations in millions
385
+ # before the memtable is flushed. If undefined, throughput / 64 * 0.3
386
+ # will be used.
387
+ # - column_metadata:
388
+ # Column required parameters:
389
+ # - name: binds a validator (and optionally an indexer) to columns
390
+ # with this name in any row of the enclosing column family.
391
+ # - validator: like cf.compare_with, an AbstractType that checks
392
+ # that the value of the column is well-defined.
393
+ # Column optional parameters:
394
+ # NOTE:
395
+ # index_name cannot be set if index_type is not also set!
396
+ # - index_name: User-friendly name for the index.
397
+ # - index_type: The type of index to be created. Currently only
398
+ # KEYS is supported.
399
+ # /NOTE
400
+ #
401
+ # NOTE:
402
+ # this keyspace definition is for demonstration purposes only.
403
+ # Cassandra will not load these definitions during startup. See
404
+ # http://wiki.apache.org/cassandra/FAQ#no_keyspaces for an explanation.
405
+ # /NOTE
406
+ keyspaces:
407
+ - name: CassandraModel
408
+ replica_placement_strategy: org.apache.cassandra.locator.SimpleStrategy
409
+ replication_factor: 1
410
+ column_families:
411
+ - name: Users
412
+ compare_with: BytesType
413
+ keys_cached: 10000
414
+ rows_cached: 1000
415
+ row_cache_save_period_in_seconds: 0
416
+ key_cache_save_period_in_seconds: 3600
417
+ memtable_flush_after_mins: 59
418
+ memtable_throughput_in_mb: 255
419
+ memtable_operations_in_millions: 0.29
420
+
421
+ - name: Posts
422
+ compare_with: BytesType
423
+
424
+ - name: Comments
425
+ column_type: Super
426
+ compare_with: TimeUUIDType
427
+ compare_subcolumns_with: BytesType
@@ -14,8 +14,8 @@
14
14
  # See the License for the specific language governing permissions and
15
15
  # limitations under the License.
16
16
 
17
- # for production, you should probably set the root to INFO
18
- # and the pattern to %c instead of %l. (%l is slower.)
17
+ # for production, you should probably set pattern to %c instead of %l.
18
+ # (%l is slower.)
19
19
 
20
20
  # output messages into a rolling log file as well as stdout
21
21
  log4j.rootLogger=INFO,stdout,R
@@ -27,12 +27,12 @@ log4j.appender.stdout.layout.ConversionPattern=%5p %d{HH:mm:ss,SSS} %m%n
27
27
 
28
28
  # rolling log file
29
29
  log4j.appender.R=org.apache.log4j.RollingFileAppender
30
- log4j.appender.file.maxFileSize=20MB
31
- log4j.appender.file.maxBackupIndex=50
30
+ log4j.appender.R.maxFileSize=20MB
31
+ log4j.appender.R.maxBackupIndex=50
32
32
  log4j.appender.R.layout=org.apache.log4j.PatternLayout
33
33
  log4j.appender.R.layout.ConversionPattern=%5p [%t] %d{ISO8601} %F (line %L) %m%n
34
34
  # Edit the next line to point to your logs directory
35
- log4j.appender.R.File=data/logs/system.log
35
+ log4j.appender.R.File=/var/log/cassandra/system.log
36
36
 
37
37
  # Application logging options
38
38
  #log4j.logger.com.facebook=DEBUG
metadata CHANGED
@@ -4,9 +4,9 @@ version: !ruby/object:Gem::Version
4
4
  prerelease: false
5
5
  segments:
6
6
  - 0
7
- - 1
8
- - 1
9
- version: 0.1.1
7
+ - 2
8
+ - 0
9
+ version: 0.2.0
10
10
  platform: ruby
11
11
  authors:
12
12
  - Tien Le
@@ -14,7 +14,7 @@ autorequire:
14
14
  bindir: bin
15
15
  cert_chain: []
16
16
 
17
- date: 2010-12-22 00:00:00 +07:00
17
+ date: 2011-01-23 00:00:00 +07:00
18
18
  default_executable:
19
19
  dependencies:
20
20
  - !ruby/object:Gem::Dependency
@@ -69,8 +69,9 @@ files:
69
69
  - test/callbacks_test.rb
70
70
  - test/cassandra_model_test.rb
71
71
  - test/config/cassandra.in.sh
72
+ - test/config/cassandra.yaml
73
+ - test/config/log4j-server.properties
72
74
  - test/config/log4j-tools.properties
73
- - test/config/log4j.properties
74
75
  - test/config/storage-conf.xml
75
76
  - test/test_helper.rb
76
77
  has_rdoc: true