redis-dump 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- data/CHANGES.txt +5 -0
- data/LICENSE.txt +19 -0
- data/README.rdoc +29 -0
- data/Rakefile +64 -0
- data/VERSION.yml +5 -0
- data/lib/redis/dump.rb +201 -0
- data/try/10_redis_dump_try.rb +57 -0
- data/try/redis-server.conf +197 -0
- metadata +104 -0
data/CHANGES.txt
ADDED
data/LICENSE.txt
ADDED
@@ -0,0 +1,19 @@
|
|
1
|
+
Copyright (c) 2010 Solutious Inc, Delano Mandelbaum
|
2
|
+
|
3
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
4
|
+
of this software and associated documentation files (the "Software"), to deal
|
5
|
+
in the Software without restriction, including without limitation the rights
|
6
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
7
|
+
copies of the Software, and to permit persons to whom the Software is
|
8
|
+
furnished to do so, subject to the following conditions:
|
9
|
+
|
10
|
+
The above copyright notice and this permission notice shall be included in
|
11
|
+
all copies or substantial portions of the Software.
|
12
|
+
|
13
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
14
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
15
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
16
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
17
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
18
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
19
|
+
THE SOFTWARE.
|
data/README.rdoc
ADDED
@@ -0,0 +1,29 @@
|
|
1
|
+
= Redis-Dump v0.1 ALPHA
|
2
|
+
|
3
|
+
<i>Backup and restore your Redis data to and from JSON.</i>
|
4
|
+
|
5
|
+
<b>NOTE: This is alpha software. DO NOT RELY ON IT!!</b>
|
6
|
+
|
7
|
+
|
8
|
+
== Installation
|
9
|
+
|
10
|
+
One of:
|
11
|
+
|
12
|
+
$ gem install redis0dump
|
13
|
+
|
14
|
+
|
15
|
+
== More Info
|
16
|
+
|
17
|
+
* Codes[http://github.com/delano/redis-dump]
|
18
|
+
|
19
|
+
|
20
|
+
== Credits
|
21
|
+
|
22
|
+
* delano[http://github.com/delano]
|
23
|
+
|
24
|
+
|
25
|
+
== Thanks
|
26
|
+
|
27
|
+
* antirez and the funky redis bunch!
|
28
|
+
|
29
|
+
|
data/Rakefile
ADDED
@@ -0,0 +1,64 @@
|
|
1
|
+
require "rubygems"
|
2
|
+
require "rake"
|
3
|
+
require "rake/clean"
|
4
|
+
require 'yaml'
|
5
|
+
|
6
|
+
begin
|
7
|
+
require 'hanna/rdoctask'
|
8
|
+
rescue LoadError
|
9
|
+
require 'rake/rdoctask'
|
10
|
+
end
|
11
|
+
|
12
|
+
config = YAML.load_file("VERSION.yml")
|
13
|
+
task :default => ["build"]
|
14
|
+
CLEAN.include [ 'pkg', 'doc' ]
|
15
|
+
name = "redis-dump"
|
16
|
+
|
17
|
+
begin
|
18
|
+
require "jeweler"
|
19
|
+
Jeweler::Tasks.new do |gem|
|
20
|
+
gem.version = "#{config[:MAJOR]}.#{config[:MINOR]}.#{config[:PATCH]}"
|
21
|
+
gem.name = "redis-dump"
|
22
|
+
gem.rubyforge_project = gem.name
|
23
|
+
gem.summary = "Backup and restore your Redis data to and from JSON."
|
24
|
+
gem.description = gem.summary
|
25
|
+
gem.email = "delano@solutious.com"
|
26
|
+
gem.homepage = "http://github.com/delano/redis-dump"
|
27
|
+
gem.authors = ["Delano Mandelbaum"]
|
28
|
+
gem.add_dependency("yajl-ruby", ">= 0.1")
|
29
|
+
gem.add_dependency("redis", ">= 2.0")
|
30
|
+
end
|
31
|
+
Jeweler::GemcutterTasks.new
|
32
|
+
rescue LoadError
|
33
|
+
puts "Jeweler (or a dependency) not available. Install it with: sudo gem install jeweler"
|
34
|
+
end
|
35
|
+
|
36
|
+
|
37
|
+
Rake::RDocTask.new do |rdoc|
|
38
|
+
version = "#{config[:MAJOR]}.#{config[:MINOR]}.#{config[:PATCH]}.#{config[:BUILD]}"
|
39
|
+
rdoc.rdoc_dir = "doc"
|
40
|
+
rdoc.title = "redis-dump #{version}"
|
41
|
+
rdoc.rdoc_files.include("README*")
|
42
|
+
rdoc.rdoc_files.include("LICENSE.txt")
|
43
|
+
#rdoc.rdoc_files.include("bin/*.rb")
|
44
|
+
rdoc.rdoc_files.include("lib/**/*.rb")
|
45
|
+
end
|
46
|
+
|
47
|
+
|
48
|
+
# Rubyforge Release / Publish Tasks ==================================
|
49
|
+
|
50
|
+
#about 'Publish website to rubyforge'
|
51
|
+
task 'publish:rdoc' => 'doc/index.html' do
|
52
|
+
sh "scp -rp doc/* rubyforge.org:/var/www/gforge-projects/#{name}/"
|
53
|
+
end
|
54
|
+
|
55
|
+
#about 'Public release to rubyforge'
|
56
|
+
task 'publish:gem' => [:package] do |t|
|
57
|
+
sh <<-end
|
58
|
+
rubyforge add_release -o Any -a CHANGES.txt -f -n README.md #{name} #{name} #{@spec.version} pkg/#{name}-#{@spec.version}.gem &&
|
59
|
+
rubyforge add_file -o Any -a CHANGES.txt -f -n README.md #{name} #{name} #{@spec.version} pkg/#{name}-#{@spec.version}.tgz
|
60
|
+
end
|
61
|
+
end
|
62
|
+
|
63
|
+
|
64
|
+
|
data/VERSION.yml
ADDED
data/lib/redis/dump.rb
ADDED
@@ -0,0 +1,201 @@
|
|
1
|
+
require 'redis'
|
2
|
+
require 'yajl'
|
3
|
+
|
4
|
+
class Redis
|
5
|
+
class Dump
|
6
|
+
unless defined?(Redis::Dump::VALID_TYPES)
|
7
|
+
VALID_TYPES = ['string', 'set', 'list', 'zset', 'hash', 'none'].freeze
|
8
|
+
end
|
9
|
+
@debug = false
|
10
|
+
@encoder = Yajl::Encoder.new
|
11
|
+
@parser = Yajl::Parser.new
|
12
|
+
@safe = true
|
13
|
+
class << self
|
14
|
+
attr_accessor :debug, :encoder, :parser, :safe
|
15
|
+
def ld(msg)
|
16
|
+
STDERR.puts "#{'%.4f' % Time.now.utc.to_f}: #{msg}" if @debug
|
17
|
+
end
|
18
|
+
end
|
19
|
+
attr_accessor :dbs, :uri
|
20
|
+
attr_reader :redis_connections
|
21
|
+
def initialize(dbs=nil,uri="redis://127.0.0.1:6379")
|
22
|
+
@redis_connections = {}
|
23
|
+
@uri = uri
|
24
|
+
unless dbs.nil?
|
25
|
+
@dbs = Range === dbs ? dbs : (dbs..dbs)
|
26
|
+
@dbs = (@dbs.first.to_i..@dbs.last.to_i) # enforce integers
|
27
|
+
open_all_connections
|
28
|
+
end
|
29
|
+
end
|
30
|
+
def open_all_connections
|
31
|
+
dbs.to_a.each { |db| redis(db) } unless dbs.nil?
|
32
|
+
end
|
33
|
+
def redis(db)
|
34
|
+
redis_connections["#{uri}/#{db}"] ||= connect("#{uri}/#{db}")
|
35
|
+
end
|
36
|
+
def connect(this_uri)
|
37
|
+
self.class.ld 'CONNECT: ' << this_uri
|
38
|
+
Redis.connect :url => this_uri
|
39
|
+
end
|
40
|
+
def each_key &blk
|
41
|
+
@redis_connections.keys.sort.each do |redis_uri|
|
42
|
+
self.class.ld ['---', "DB: #{redis_connections[redis_uri].client.db}", '---'].join($/)
|
43
|
+
keys = redis_connections[redis_uri].keys
|
44
|
+
keys.each do |key|
|
45
|
+
blk.call redis_connections[redis_uri], key
|
46
|
+
end
|
47
|
+
end
|
48
|
+
end
|
49
|
+
def dump(&each_record)
|
50
|
+
values = []
|
51
|
+
each_key do |this_redis,key|
|
52
|
+
info = Redis::Dump.dump this_redis, key
|
53
|
+
#self.class.ld " #{key} (#{info[:type]}): #{info[:size].to_bytes}"
|
54
|
+
encoded = self.class.encoder.encode info
|
55
|
+
each_record.nil? ? (values << encoded) : each_record.call(encoded)
|
56
|
+
end
|
57
|
+
values
|
58
|
+
end
|
59
|
+
def report(&each_record)
|
60
|
+
values = []
|
61
|
+
each_key do |this_redis,key|
|
62
|
+
info = Redis::Dump.report this_redis, key
|
63
|
+
#self.class.ld " #{key} (#{info[:type]}): #{info[:size].to_bytes}"
|
64
|
+
each_record.nil? ? (values << info) : each_record.call(info)
|
65
|
+
end
|
66
|
+
values
|
67
|
+
end
|
68
|
+
def load(string_or_stream, &each_record)
|
69
|
+
count = 0
|
70
|
+
Redis::Dump.ld " LOAD SOURCE: #{string_or_stream}"
|
71
|
+
Redis::Dump.parser.parse string_or_stream do |obj|
|
72
|
+
unless @dbs.member?(obj["db"].to_i)
|
73
|
+
Redis::Dump.ld "db out of range: #{obj["db"]}"
|
74
|
+
next
|
75
|
+
end
|
76
|
+
this_redis = redis(obj["db"])
|
77
|
+
Redis::Dump.ld "load[#{this_redis.hash}, #{obj}]"
|
78
|
+
if each_record.nil?
|
79
|
+
if Redis::Dump.safe && this_redis.exists(obj['key'])
|
80
|
+
Redis::Dump.ld " record exists (no change)"
|
81
|
+
next
|
82
|
+
end
|
83
|
+
Redis::Dump.set_value this_redis, obj['key'], obj['type'], obj['value'], obj['ttl']
|
84
|
+
else
|
85
|
+
each_record.call obj
|
86
|
+
end
|
87
|
+
count += 1
|
88
|
+
end
|
89
|
+
count
|
90
|
+
end
|
91
|
+
module ClassMethods
|
92
|
+
def type(this_redis, key)
|
93
|
+
type = this_redis.type key
|
94
|
+
raise TypeError, "Unknown type: #{type}" if !VALID_TYPES.member?(type)
|
95
|
+
type
|
96
|
+
end
|
97
|
+
def report(this_redis, key)
|
98
|
+
info = { 'db' => this_redis.client.db, 'key' => key }
|
99
|
+
info['type'] = type(this_redis, key)
|
100
|
+
info['size'] = stringify(this_redis, key, info['type'], info['value']).size
|
101
|
+
info['bytes'] = info['size'].to_bytes
|
102
|
+
ld "report[#{this_redis.hash}, #{info}]"
|
103
|
+
info
|
104
|
+
end
|
105
|
+
def dump(this_redis, key)
|
106
|
+
info = { 'db' => this_redis.client.db, 'key' => key }
|
107
|
+
info['ttl'] = this_redis.ttl key
|
108
|
+
info['type'] = type(this_redis, key)
|
109
|
+
info['value'] = value(this_redis, key, info['type'])
|
110
|
+
info['size'] = stringify(this_redis, key, info['type'], info['value']).size
|
111
|
+
ld "dump[#{this_redis.hash}, #{info}]"
|
112
|
+
info
|
113
|
+
end
|
114
|
+
def set_value(this_redis, key, type, value, expire=nil)
|
115
|
+
t ||= type
|
116
|
+
send("set_value_#{t}", this_redis, key, value)
|
117
|
+
this_redis.expire key, expire if expire.to_s.to_i > 0
|
118
|
+
end
|
119
|
+
def value(this_redis, key, t=nil)
|
120
|
+
t ||= type
|
121
|
+
send("value_#{t}", this_redis, key)
|
122
|
+
end
|
123
|
+
def stringify(this_redis, key, t=nil, v=nil)
|
124
|
+
t ||= type
|
125
|
+
send("stringify_#{t}", this_redis, key, v)
|
126
|
+
end
|
127
|
+
|
128
|
+
def set_value_hash(this_redis, key, hash)
|
129
|
+
hash.keys.each { |k| this_redis.hset key, k, hash[k] }
|
130
|
+
end
|
131
|
+
def set_value_list(this_redis, key, list)
|
132
|
+
list.each { |value| this_redis.rpush key, value }
|
133
|
+
end
|
134
|
+
def set_value_set(this_redis, key, set)
|
135
|
+
set.each { |value| this_redis.sadd key, value }
|
136
|
+
end
|
137
|
+
def set_value_zset(this_redis, key, zset)
|
138
|
+
zset.each { |pair| this_redis.zadd key, pair[1].to_f, pair[0] }
|
139
|
+
end
|
140
|
+
def set_value_string(this_redis, key, str)
|
141
|
+
this_redis.set key, str
|
142
|
+
end
|
143
|
+
|
144
|
+
def value_string(this_redis, key) this_redis.get key end
|
145
|
+
def value_list (this_redis, key) this_redis.lrange key, 0, -1 end
|
146
|
+
def value_set (this_redis, key) this_redis.smembers key end
|
147
|
+
def value_zset (this_redis, key)
|
148
|
+
zset = this_redis.zrange(key, 0, -1, :with_scores => true).tuple
|
149
|
+
end
|
150
|
+
def value_hash (this_redis, key) this_redis.hgetall(key) end
|
151
|
+
def value_none (this_redis, key) '' end
|
152
|
+
def stringify_string(this_redis, key, v=nil) (v || value_string(this_redis, key)) end
|
153
|
+
def stringify_list (this_redis, key, v=nil) (v || value_list(this_redis, key)).join end
|
154
|
+
def stringify_set (this_redis, key, v=nil) (v || value_set(this_redis, key)).join end
|
155
|
+
def stringify_zset (this_redis, key, v=nil) (v || value_zset(this_redis, key)).flatten.compact.join end
|
156
|
+
def stringify_hash (this_redis, key, v=nil) (v || value_hash(this_redis, key)).to_a.flatten.compact.join end
|
157
|
+
def stringify_none (this_redis, key, v=nil) (v || '') end
|
158
|
+
end
|
159
|
+
extend Redis::Dump::ClassMethods
|
160
|
+
end
|
161
|
+
end
|
162
|
+
|
163
|
+
class Array
|
164
|
+
def chunk(number_of_chunks)
|
165
|
+
chunks = (1..number_of_chunks).collect { [] }
|
166
|
+
chunks.each do |a_chunk|
|
167
|
+
a_chunk << self.shift if self.any?
|
168
|
+
end
|
169
|
+
chunks
|
170
|
+
end
|
171
|
+
alias / chunk
|
172
|
+
def tuple(tuple_size=2)
|
173
|
+
tuples = (1..(size/tuple_size)).collect { [] }
|
174
|
+
tuples.each_with_index do |a_tuple,idx|
|
175
|
+
tuple_size.times { a_tuple << self.shift } if self.any?
|
176
|
+
end
|
177
|
+
tuples
|
178
|
+
end
|
179
|
+
end
|
180
|
+
|
181
|
+
class Numeric
|
182
|
+
def to_ms
|
183
|
+
(self*1000).to_i
|
184
|
+
end
|
185
|
+
|
186
|
+
# TODO: Use 1024?
|
187
|
+
def to_bytes
|
188
|
+
args = case self.abs.to_i
|
189
|
+
when (1000)..(1000**2)
|
190
|
+
'%3.2f%s' % [(self / 1000.to_f).to_s, 'KB']
|
191
|
+
when (1000**2)..(1000**3)
|
192
|
+
'%3.2f%s' % [(self / (1000**2).to_f).to_s, 'MB']
|
193
|
+
when (1000**3)..(1000**4)
|
194
|
+
'%3.2f%s' % [(self / (1000**3).to_f).to_s, 'GB']
|
195
|
+
when (1000**4)..(1000**6)
|
196
|
+
'%3.2f%s' % [(self / (1000**4).to_f).to_s, 'TB']
|
197
|
+
else
|
198
|
+
[self, 'B'].join
|
199
|
+
end
|
200
|
+
end
|
201
|
+
end
|
@@ -0,0 +1,57 @@
|
|
1
|
+
require 'lib/redis/dump'
|
2
|
+
|
3
|
+
@uri_base = "redis://127.0.0.1:6371"
|
4
|
+
|
5
|
+
Redis::Dump.debug = false
|
6
|
+
Redis::Dump.safe = true
|
7
|
+
|
8
|
+
## Connect to DB
|
9
|
+
@rdump = Redis::Dump.new 0, @uri_base
|
10
|
+
@rdump.redis_connections.size
|
11
|
+
#=> 1
|
12
|
+
|
13
|
+
## Populate
|
14
|
+
@rdump.redis(0).set 'stringkey', 'stringvalue'
|
15
|
+
@rdump.redis(0).expire 'stringkey', 100
|
16
|
+
@rdump.redis(0).hset 'hashkey', 'field_a', 'value_a'
|
17
|
+
@rdump.redis(0).hset 'hashkey', 'field_b', 'value_b'
|
18
|
+
@rdump.redis(0).hset 'hashkey', 'field_c', 'value_c'
|
19
|
+
3.times { |idx| @rdump.redis(0).rpush 'listkey', "value_#{idx}" }
|
20
|
+
4.times { |idx| @rdump.redis(0).sadd 'setkey', "value_#{idx}" }
|
21
|
+
5.times { |idx| @rdump.redis(0).zadd 'zsetkey', idx.zero? ? 100 : 100*idx, "value_#{idx}" }
|
22
|
+
@rdump.redis(0).keys.size
|
23
|
+
#=> 5
|
24
|
+
|
25
|
+
## Can dump
|
26
|
+
@values = @rdump.dump
|
27
|
+
@values.size
|
28
|
+
#=> 5
|
29
|
+
|
30
|
+
# Clear DB 0
|
31
|
+
db0 = Redis::Dump.new 0, @uri_base
|
32
|
+
db0.redis(0).flushdb
|
33
|
+
db0.redis(0).keys.size
|
34
|
+
#=> 0
|
35
|
+
|
36
|
+
## Can load data
|
37
|
+
@rdump.load @values.join
|
38
|
+
@rdump.redis(0).keys.size
|
39
|
+
#=> 5
|
40
|
+
|
41
|
+
## DB 0 content matches previous dump content
|
42
|
+
values = @rdump.dump
|
43
|
+
values.sort
|
44
|
+
#=> @values.sort
|
45
|
+
|
46
|
+
## Won't load data in safe mode if records exist
|
47
|
+
@rdump.load @values.join
|
48
|
+
#=> 0
|
49
|
+
|
50
|
+
## Will load data if records exist and safe mode is disabled
|
51
|
+
Redis::Dump.safe = false
|
52
|
+
@rdump.load @values.join
|
53
|
+
#=> 5
|
54
|
+
|
55
|
+
Redis::Dump.safe = true
|
56
|
+
db0 = Redis::Dump.new 0, @uri_base
|
57
|
+
db0.redis(0).flushdb
|
@@ -0,0 +1,197 @@
|
|
1
|
+
# REDIS-DUMP TEST CONFIG -- 2010-11-15
|
2
|
+
|
3
|
+
# Usage:
|
4
|
+
#
|
5
|
+
# $ redis-server try/redis-server.conf
|
6
|
+
|
7
|
+
dir /tmp
|
8
|
+
|
9
|
+
pidfile redisdump-test.pid
|
10
|
+
logfile redisdump-test.log
|
11
|
+
dbfilename redisdump-test.rdb
|
12
|
+
|
13
|
+
port 6371
|
14
|
+
bind 127.0.0.1
|
15
|
+
daemonize yes
|
16
|
+
|
17
|
+
timeout 300
|
18
|
+
|
19
|
+
#loglevel debug
|
20
|
+
#loglevel verbose
|
21
|
+
loglevel warning
|
22
|
+
|
23
|
+
databases 16
|
24
|
+
|
25
|
+
save 900 100
|
26
|
+
save 300 5000
|
27
|
+
|
28
|
+
|
29
|
+
rdbcompression yes
|
30
|
+
|
31
|
+
# requirepass foobared
|
32
|
+
# maxclients 0
|
33
|
+
|
34
|
+
appendonly no
|
35
|
+
appendfilename redisdump-test.aof
|
36
|
+
|
37
|
+
# TODO: Consider having separate configs when the usecase for Redis
|
38
|
+
# changes. For example, one for production, another for batch processing.
|
39
|
+
#
|
40
|
+
# Nothing is changed from here on out:
|
41
|
+
|
42
|
+
################################## INCLUDES ###################################
|
43
|
+
|
44
|
+
# Include one or more other config files here. This is useful if you
|
45
|
+
# have a standard template that goes to all redis server but also need
|
46
|
+
# to customize a few per-server settings. Include files can include
|
47
|
+
# other files, so use this wisely.
|
48
|
+
#
|
49
|
+
# include /path/to/local.conf
|
50
|
+
# include /path/to/other.conf
|
51
|
+
|
52
|
+
################################# REPLICATION #################################
|
53
|
+
|
54
|
+
# Master-Slave replication. Use slaveof to make a Redis instance a copy of
|
55
|
+
# another Redis server. Note that the configuration is local to the slave
|
56
|
+
# so for example it is possible to configure the slave to save the DB with a
|
57
|
+
# different interval, or to listen to another port, and so on.
|
58
|
+
#
|
59
|
+
# slaveof <masterip> <masterport>
|
60
|
+
|
61
|
+
# If the master is password protected (using the "requirepass" configuration
|
62
|
+
# directive below) it is possible to tell the slave to authenticate before
|
63
|
+
# starting the replication synchronization process, otherwise the master will
|
64
|
+
# refuse the slave request.
|
65
|
+
#
|
66
|
+
# masterauth <master-password>
|
67
|
+
|
68
|
+
|
69
|
+
# The fsync() call tells the Operating System to actually write data on disk
|
70
|
+
# instead to wait for more data in the output buffer. Some OS will really flush
|
71
|
+
# data on disk, some other OS will just try to do it ASAP.
|
72
|
+
#
|
73
|
+
# Redis supports three different modes:
|
74
|
+
#
|
75
|
+
# no: don't fsync, just let the OS flush the data when it wants. Faster.
|
76
|
+
# always: fsync after every write to the append only log . Slow, Safest.
|
77
|
+
# everysec: fsync only if one second passed since the last fsync. Compromise.
|
78
|
+
#
|
79
|
+
# The default is "everysec" that's usually the right compromise between
|
80
|
+
# speed and data safety. It's up to you to understand if you can relax this to
|
81
|
+
# "no" that will will let the operating system flush the output buffer when
|
82
|
+
# it wants, for better performances (but if you can live with the idea of
|
83
|
+
# some data loss consider the default persistence mode that's snapshotting),
|
84
|
+
# or on the contrary, use "always" that's very slow but a bit safer than
|
85
|
+
# everysec.
|
86
|
+
#
|
87
|
+
# If unsure, use "everysec".
|
88
|
+
|
89
|
+
# appendfsync always
|
90
|
+
appendfsync everysec
|
91
|
+
# appendfsync no
|
92
|
+
|
93
|
+
################################ VIRTUAL MEMORY ###############################
|
94
|
+
|
95
|
+
# Virtual Memory allows Redis to work with datasets bigger than the actual
|
96
|
+
# amount of RAM needed to hold the whole dataset in memory.
|
97
|
+
# In order to do so very used keys are taken in memory while the other keys
|
98
|
+
# are swapped into a swap file, similarly to what operating systems do
|
99
|
+
# with memory pages.
|
100
|
+
#
|
101
|
+
# To enable VM just set 'vm-enabled' to yes, and set the following three
|
102
|
+
# VM parameters accordingly to your needs.
|
103
|
+
|
104
|
+
vm-enabled no
|
105
|
+
# vm-enabled yes
|
106
|
+
|
107
|
+
# This is the path of the Redis swap file. As you can guess, swap files
|
108
|
+
# can't be shared by different Redis instances, so make sure to use a swap
|
109
|
+
# file for every redis process you are running. Redis will complain if the
|
110
|
+
# swap file is already in use.
|
111
|
+
#
|
112
|
+
# The best kind of storage for the Redis swap file (that's accessed at random)
|
113
|
+
# is a Solid State Disk (SSD).
|
114
|
+
#
|
115
|
+
# *** WARNING *** if you are using a shared hosting the default of putting
|
116
|
+
# the swap file under /tmp is not secure. Create a dir with access granted
|
117
|
+
# only to Redis user and configure Redis to create the swap file there.
|
118
|
+
vm-swap-file /tmp/redis.swap
|
119
|
+
|
120
|
+
# vm-max-memory configures the VM to use at max the specified amount of
|
121
|
+
# RAM. Everything that deos not fit will be swapped on disk *if* possible, that
|
122
|
+
# is, if there is still enough contiguous space in the swap file.
|
123
|
+
#
|
124
|
+
# With vm-max-memory 0 the system will swap everything it can. Not a good
|
125
|
+
# default, just specify the max amount of RAM you can in bytes, but it's
|
126
|
+
# better to leave some margin. For instance specify an amount of RAM
|
127
|
+
# that's more or less between 60 and 80% of your free RAM.
|
128
|
+
vm-max-memory 0
|
129
|
+
|
130
|
+
# Redis swap files is split into pages. An object can be saved using multiple
|
131
|
+
# contiguous pages, but pages can't be shared between different objects.
|
132
|
+
# So if your page is too big, small objects swapped out on disk will waste
|
133
|
+
# a lot of space. If you page is too small, there is less space in the swap
|
134
|
+
# file (assuming you configured the same number of total swap file pages).
|
135
|
+
#
|
136
|
+
# If you use a lot of small objects, use a page size of 64 or 32 bytes.
|
137
|
+
# If you use a lot of big objects, use a bigger page size.
|
138
|
+
# If unsure, use the default :)
|
139
|
+
vm-page-size 32
|
140
|
+
|
141
|
+
# Number of total memory pages in the swap file.
|
142
|
+
# Given that the page table (a bitmap of free/used pages) is taken in memory,
|
143
|
+
# every 8 pages on disk will consume 1 byte of RAM.
|
144
|
+
#
|
145
|
+
# The total swap size is vm-page-size * vm-pages
|
146
|
+
#
|
147
|
+
# With the default of 32-bytes memory pages and 134217728 pages Redis will
|
148
|
+
# use a 4 GB swap file, that will use 16 MB of RAM for the page table.
|
149
|
+
#
|
150
|
+
# It's better to use the smallest acceptable value for your application,
|
151
|
+
# but the default is large in order to work in most conditions.
|
152
|
+
vm-pages 134217728
|
153
|
+
|
154
|
+
# Max number of VM I/O threads running at the same time.
|
155
|
+
# This threads are used to read/write data from/to swap file, since they
|
156
|
+
# also encode and decode objects from disk to memory or the reverse, a bigger
|
157
|
+
# number of threads can help with big objects even if they can't help with
|
158
|
+
# I/O itself as the physical device may not be able to couple with many
|
159
|
+
# reads/writes operations at the same time.
|
160
|
+
#
|
161
|
+
# The special value of 0 turn off threaded I/O and enables the blocking
|
162
|
+
# Virtual Memory implementation.
|
163
|
+
vm-max-threads 4
|
164
|
+
|
165
|
+
############################### ADVANCED CONFIG ###############################
|
166
|
+
|
167
|
+
# Glue small output buffers together in order to send small replies in a
|
168
|
+
# single TCP packet. Uses a bit more CPU but most of the times it is a win
|
169
|
+
# in terms of number of queries per second. Use 'yes' if unsure.
|
170
|
+
glueoutputbuf yes
|
171
|
+
|
172
|
+
# Hashes are encoded in a special way (much more memory efficient) when they
|
173
|
+
# have at max a given numer of elements, and the biggest element does not
|
174
|
+
# exceed a given threshold. You can configure this limits with the following
|
175
|
+
# configuration directives.
|
176
|
+
hash-max-zipmap-entries 64
|
177
|
+
hash-max-zipmap-value 512
|
178
|
+
|
179
|
+
# Active rehashing uses 1 millisecond every 100 milliseconds of CPU time in
|
180
|
+
# order to help rehashing the main Redis hash table (the one mapping top-level
|
181
|
+
# keys to values). The hash table implementation redis uses (see dict.c)
|
182
|
+
# performs a lazy rehashing: the more operation you run into an hash table
|
183
|
+
# that is rhashing, the more rehashing "steps" are performed, so if the
|
184
|
+
# server is idle the rehashing is never complete and some more memory is used
|
185
|
+
# by the hash table.
|
186
|
+
#
|
187
|
+
# The default is to use this millisecond 10 times every second in order to
|
188
|
+
# active rehashing the main dictionaries, freeing memory when possible.
|
189
|
+
#
|
190
|
+
# If unsure:
|
191
|
+
# use "activerehashing no" if you have hard latency requirements and it is
|
192
|
+
# not a good thing in your environment that Redis can reply form time to time
|
193
|
+
# to queries with 2 milliseconds delay.
|
194
|
+
#
|
195
|
+
# use "activerehashing yes" if you don't have such hard requirements but
|
196
|
+
# want to free memory asap when possible.
|
197
|
+
activerehashing yes
|
metadata
ADDED
@@ -0,0 +1,104 @@
|
|
1
|
+
--- !ruby/object:Gem::Specification
|
2
|
+
name: redis-dump
|
3
|
+
version: !ruby/object:Gem::Version
|
4
|
+
hash: 27
|
5
|
+
prerelease: false
|
6
|
+
segments:
|
7
|
+
- 0
|
8
|
+
- 1
|
9
|
+
- 0
|
10
|
+
version: 0.1.0
|
11
|
+
platform: ruby
|
12
|
+
authors:
|
13
|
+
- Delano Mandelbaum
|
14
|
+
autorequire:
|
15
|
+
bindir: bin
|
16
|
+
cert_chain: []
|
17
|
+
|
18
|
+
date: 2010-11-15 00:00:00 -05:00
|
19
|
+
default_executable:
|
20
|
+
dependencies:
|
21
|
+
- !ruby/object:Gem::Dependency
|
22
|
+
name: yajl-ruby
|
23
|
+
prerelease: false
|
24
|
+
requirement: &id001 !ruby/object:Gem::Requirement
|
25
|
+
none: false
|
26
|
+
requirements:
|
27
|
+
- - ">="
|
28
|
+
- !ruby/object:Gem::Version
|
29
|
+
hash: 9
|
30
|
+
segments:
|
31
|
+
- 0
|
32
|
+
- 1
|
33
|
+
version: "0.1"
|
34
|
+
type: :runtime
|
35
|
+
version_requirements: *id001
|
36
|
+
- !ruby/object:Gem::Dependency
|
37
|
+
name: redis
|
38
|
+
prerelease: false
|
39
|
+
requirement: &id002 !ruby/object:Gem::Requirement
|
40
|
+
none: false
|
41
|
+
requirements:
|
42
|
+
- - ">="
|
43
|
+
- !ruby/object:Gem::Version
|
44
|
+
hash: 3
|
45
|
+
segments:
|
46
|
+
- 2
|
47
|
+
- 0
|
48
|
+
version: "2.0"
|
49
|
+
type: :runtime
|
50
|
+
version_requirements: *id002
|
51
|
+
description: Backup and restore your Redis data to and from JSON.
|
52
|
+
email: delano@solutious.com
|
53
|
+
executables: []
|
54
|
+
|
55
|
+
extensions: []
|
56
|
+
|
57
|
+
extra_rdoc_files:
|
58
|
+
- LICENSE.txt
|
59
|
+
- README.rdoc
|
60
|
+
files:
|
61
|
+
- CHANGES.txt
|
62
|
+
- LICENSE.txt
|
63
|
+
- README.rdoc
|
64
|
+
- Rakefile
|
65
|
+
- VERSION.yml
|
66
|
+
- lib/redis/dump.rb
|
67
|
+
- try/10_redis_dump_try.rb
|
68
|
+
- try/redis-server.conf
|
69
|
+
has_rdoc: true
|
70
|
+
homepage: http://github.com/delano/redis-dump
|
71
|
+
licenses: []
|
72
|
+
|
73
|
+
post_install_message:
|
74
|
+
rdoc_options:
|
75
|
+
- --charset=UTF-8
|
76
|
+
require_paths:
|
77
|
+
- lib
|
78
|
+
required_ruby_version: !ruby/object:Gem::Requirement
|
79
|
+
none: false
|
80
|
+
requirements:
|
81
|
+
- - ">="
|
82
|
+
- !ruby/object:Gem::Version
|
83
|
+
hash: 3
|
84
|
+
segments:
|
85
|
+
- 0
|
86
|
+
version: "0"
|
87
|
+
required_rubygems_version: !ruby/object:Gem::Requirement
|
88
|
+
none: false
|
89
|
+
requirements:
|
90
|
+
- - ">="
|
91
|
+
- !ruby/object:Gem::Version
|
92
|
+
hash: 3
|
93
|
+
segments:
|
94
|
+
- 0
|
95
|
+
version: "0"
|
96
|
+
requirements: []
|
97
|
+
|
98
|
+
rubyforge_project: redis-dump
|
99
|
+
rubygems_version: 1.3.7
|
100
|
+
signing_key:
|
101
|
+
specification_version: 3
|
102
|
+
summary: Backup and restore your Redis data to and from JSON.
|
103
|
+
test_files: []
|
104
|
+
|