dk-bdb 0.2.4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- data/LICENSE +20 -0
- data/README.md +102 -0
- data/VERSION +1 -0
- data/examples/replication.rb +29 -0
- data/ext/bdb.c +3421 -0
- data/ext/bdb.h +104 -0
- data/ext/extconf.rb +59 -0
- data/lib/bdb/base.rb +68 -0
- data/lib/bdb/database.rb +207 -0
- data/lib/bdb/environment.rb +135 -0
- data/lib/bdb/partitioned_database.rb +74 -0
- data/lib/bdb/replication.rb +68 -0
- data/lib/bdb/result_set.rb +41 -0
- data/test/benchmark.rb +31 -0
- data/test/cursor_test.rb +150 -0
- data/test/database_test.rb +18 -0
- data/test/database_test_helper.rb +37 -0
- data/test/db_test.rb +157 -0
- data/test/deadlock_test.rb +125 -0
- data/test/env_test.rb +101 -0
- data/test/replication_test.rb +47 -0
- data/test/stat_test.rb +22 -0
- data/test/test_helper.rb +14 -0
- data/test/txn_test.rb +74 -0
- metadata +92 -0
data/ext/bdb.h
ADDED
|
@@ -0,0 +1,104 @@
|
|
|
1
|
+
|
|
2
|
+
#ifndef BDB2_H
|
|
3
|
+
#define BDB2_H
|
|
4
|
+
|
|
5
|
+
#include <ruby.h>
|
|
6
|
+
|
|
7
|
+
#ifdef stat
|
|
8
|
+
#undef stat
|
|
9
|
+
#endif
|
|
10
|
+
|
|
11
|
+
#ifdef close
|
|
12
|
+
#undef close
|
|
13
|
+
#endif
|
|
14
|
+
|
|
15
|
+
#ifdef rename
|
|
16
|
+
#undef rename
|
|
17
|
+
#endif
|
|
18
|
+
|
|
19
|
+
//#include <version.h>
|
|
20
|
+
#include <db.h>
|
|
21
|
+
|
|
22
|
+
#define NOTXN NULL
|
|
23
|
+
|
|
24
|
+
#ifdef OPEN_MAX
|
|
25
|
+
#define LMAXFD OPEN_MAX
|
|
26
|
+
#else
|
|
27
|
+
#ifdef FOPEN_MAX
|
|
28
|
+
#define LMAXFD FOPEN_MAX
|
|
29
|
+
#endif
|
|
30
|
+
#endif
|
|
31
|
+
#ifndef LMAXFD
|
|
32
|
+
#error "No max fd define available."
|
|
33
|
+
#endif
|
|
34
|
+
|
|
35
|
+
#define FNLEN 40
|
|
36
|
+
|
|
37
|
+
#define filename_copy(fp,fv) \
|
|
38
|
+
strncpy(fp,RSTRING_PTR(fv),FNLEN);
|
|
39
|
+
|
|
40
|
+
#define filename_dup(fpd,fps) \
|
|
41
|
+
strncpy(fpd,fps,FNLEN);
|
|
42
|
+
|
|
43
|
+
typedef struct s_envh {
|
|
44
|
+
VALUE self;
|
|
45
|
+
DB_ENV *env;
|
|
46
|
+
VALUE adb; /* Ruby array holding opened databases */
|
|
47
|
+
VALUE atxn; /* Ruby array holding open transactions */
|
|
48
|
+
} t_envh;
|
|
49
|
+
|
|
50
|
+
typedef struct s_dbh {
|
|
51
|
+
VALUE self;
|
|
52
|
+
DB *db;
|
|
53
|
+
int db_opened;
|
|
54
|
+
VALUE aproc;
|
|
55
|
+
VALUE sproc; /* key sorting callback */
|
|
56
|
+
|
|
57
|
+
t_envh *env; /* Parent environment, NULL if not opened from one */
|
|
58
|
+
VALUE adbc; /* Ruby array holding opened cursor */
|
|
59
|
+
char filename[FNLEN+1];
|
|
60
|
+
} t_dbh;
|
|
61
|
+
|
|
62
|
+
typedef struct s_dbch {
|
|
63
|
+
VALUE self;
|
|
64
|
+
DBC *dbc;
|
|
65
|
+
t_dbh *db;
|
|
66
|
+
char filename[FNLEN+1];
|
|
67
|
+
} t_dbch;
|
|
68
|
+
|
|
69
|
+
typedef struct s_txnh {
|
|
70
|
+
VALUE self;
|
|
71
|
+
DB_TXN *txn;
|
|
72
|
+
t_envh *env;
|
|
73
|
+
} t_txnh;
|
|
74
|
+
|
|
75
|
+
#define cu(b,m) \
|
|
76
|
+
rb_define_const(b,#m,UINT2NUM(m))
|
|
77
|
+
|
|
78
|
+
#define ci(b,m) \
|
|
79
|
+
rb_define_const(b,#m,INT2NUM(m))
|
|
80
|
+
|
|
81
|
+
#define cs(b,m) \
|
|
82
|
+
rb_define_const(b,#m,rb_str_new2(m))
|
|
83
|
+
|
|
84
|
+
#define simple_set(fname) \
|
|
85
|
+
VALUE db_ ## fname ## _eq(VALUE obj, VALUE v) \
|
|
86
|
+
{ \
|
|
87
|
+
rb_ivar_set(obj,fv_ ## fname,v); \
|
|
88
|
+
return obj; \
|
|
89
|
+
}
|
|
90
|
+
|
|
91
|
+
#define attr_writer(fname) \
|
|
92
|
+
VALUE fname ## _writer(VALUE obj, VALUE v) \
|
|
93
|
+
{ \
|
|
94
|
+
rb_ivar_set(obj,fv_ ## fname,v); \
|
|
95
|
+
return obj; \
|
|
96
|
+
}
|
|
97
|
+
|
|
98
|
+
#define attr_reader(fname) \
|
|
99
|
+
VALUE fname ## _reader(VALUE obj) \
|
|
100
|
+
{ \
|
|
101
|
+
return rb_ivar_get(obj,fv_ ## fname); \
|
|
102
|
+
}
|
|
103
|
+
|
|
104
|
+
#endif
|
data/ext/extconf.rb
ADDED
|
@@ -0,0 +1,59 @@
|
|
|
1
|
+
#!/usr/bin/env ruby
|
|
2
|
+
require 'mkmf'
|
|
3
|
+
default_dir = '/usr/local/BerkeleyDB.4.8'
|
|
4
|
+
inc, lib = dir_config('db', "#{default_dir}/include", "#{default_dir}/lib")
|
|
5
|
+
|
|
6
|
+
versions=%w(db-4.8 db-4.7 db-4.6 db-4.5 db-4.4 db-4.3 db-4.2)
|
|
7
|
+
until versions.empty?
|
|
8
|
+
(lib_ok = have_library(versions.shift, 'db_version', 'db.h')) && break
|
|
9
|
+
end
|
|
10
|
+
|
|
11
|
+
def create_header
|
|
12
|
+
if File.exist?("bdb_aux._c")
|
|
13
|
+
message("Not writing bdb_aux._c (defines), already exists\n")
|
|
14
|
+
return
|
|
15
|
+
end
|
|
16
|
+
|
|
17
|
+
message("Writing bdb_aux._c (defines), this takes a while\n")
|
|
18
|
+
db_header = $CPPFLAGS.split.select { |f| f =~ /^-I/ }.map { |e|
|
|
19
|
+
f = File.join(e[2..-1], 'db.h')
|
|
20
|
+
File.exists?(f) ? f : nil
|
|
21
|
+
}.select { |e| e }.first
|
|
22
|
+
|
|
23
|
+
n=0
|
|
24
|
+
defines=[]
|
|
25
|
+
File.open(db_header) {|fd|
|
|
26
|
+
File.open("bdb_aux._c","w") {|hd|
|
|
27
|
+
hd.puts("/* This file automatically generated by extconf.rb */\n")
|
|
28
|
+
fd.each_line {|l|
|
|
29
|
+
if l =~ %r{^#define\s+(DBC?_\w*)\s+([^\/]*)\s*(.*?)(\/\*.*)?$}
|
|
30
|
+
name = $1
|
|
31
|
+
value = $2
|
|
32
|
+
if macro_defined?(name,"#include <db.h>")
|
|
33
|
+
case value
|
|
34
|
+
when /^"/
|
|
35
|
+
hd.print(%Q{cs(mBdb,%s);\n}%[name])
|
|
36
|
+
when /^\(?(0x|\d)/
|
|
37
|
+
hd.print(%Q{cu(mBdb,%s);\n}%[name])
|
|
38
|
+
when /^\(?-/
|
|
39
|
+
hd.print(%Q{ci(mBdb,%s);\n}%[name])
|
|
40
|
+
else
|
|
41
|
+
$stderr.puts "don't know how to handle #{name} #{value.strip}, guessing UINT"
|
|
42
|
+
hd.print(%Q{cu(mBdb,%s);\n}%[name])
|
|
43
|
+
end
|
|
44
|
+
n+=1
|
|
45
|
+
end
|
|
46
|
+
end
|
|
47
|
+
}
|
|
48
|
+
}
|
|
49
|
+
message("\nwrote #{n} defines\n")
|
|
50
|
+
}
|
|
51
|
+
end
|
|
52
|
+
|
|
53
|
+
if lib_ok
|
|
54
|
+
create_header
|
|
55
|
+
create_makefile('bdb')
|
|
56
|
+
else
|
|
57
|
+
$stderr.puts("cannot create Makefile")
|
|
58
|
+
exit 1
|
|
59
|
+
end
|
data/lib/bdb/base.rb
ADDED
|
@@ -0,0 +1,68 @@
|
|
|
1
|
+
require 'bdb'
|
|
2
|
+
require 'tuple'
|
|
3
|
+
require 'bdb/environment'
|
|
4
|
+
require 'bdb/result_set'
|
|
5
|
+
|
|
6
|
+
class Bdb::Base
|
|
7
|
+
def initialize(opts)
|
|
8
|
+
@config = Bdb::Environment.config.merge(opts)
|
|
9
|
+
@indexes = {}
|
|
10
|
+
end
|
|
11
|
+
attr_reader :indexes
|
|
12
|
+
|
|
13
|
+
def config(config = {})
|
|
14
|
+
@config.merge!(config)
|
|
15
|
+
end
|
|
16
|
+
|
|
17
|
+
def index_by(field, opts = {})
|
|
18
|
+
raise "index on #{field} already exists" if indexes[field]
|
|
19
|
+
indexes[field] = opts
|
|
20
|
+
end
|
|
21
|
+
|
|
22
|
+
def path
|
|
23
|
+
config[:path] || Dir.pwd
|
|
24
|
+
end
|
|
25
|
+
|
|
26
|
+
def environment
|
|
27
|
+
@environment ||= Bdb::Environment.new(path, self)
|
|
28
|
+
end
|
|
29
|
+
|
|
30
|
+
def transaction(nested = true, &block)
|
|
31
|
+
environment.transaction(nested, &block)
|
|
32
|
+
end
|
|
33
|
+
|
|
34
|
+
def synchronize(&block)
|
|
35
|
+
environment.synchronize(&block)
|
|
36
|
+
end
|
|
37
|
+
|
|
38
|
+
def checkpoint(opts = {})
|
|
39
|
+
environment.checkpoint(opts)
|
|
40
|
+
end
|
|
41
|
+
|
|
42
|
+
def master?
|
|
43
|
+
environment.master?
|
|
44
|
+
end
|
|
45
|
+
|
|
46
|
+
private
|
|
47
|
+
|
|
48
|
+
def get_field(field, value)
|
|
49
|
+
value.kind_of?(Hash) ? value[field] : value.send(field)
|
|
50
|
+
end
|
|
51
|
+
end
|
|
52
|
+
|
|
53
|
+
class Object
|
|
54
|
+
attr_accessor :bdb_locator_key
|
|
55
|
+
end
|
|
56
|
+
|
|
57
|
+
# Array comparison should try Tuple comparison first.
|
|
58
|
+
class Array
|
|
59
|
+
cmp = instance_method(:<=>)
|
|
60
|
+
|
|
61
|
+
define_method(:<=>) do |other|
|
|
62
|
+
begin
|
|
63
|
+
Tuple.dump(self) <=> Tuple.dump(other)
|
|
64
|
+
rescue TypeError => e
|
|
65
|
+
cmp.bind(self).call(other)
|
|
66
|
+
end
|
|
67
|
+
end
|
|
68
|
+
end
|
data/lib/bdb/database.rb
ADDED
|
@@ -0,0 +1,207 @@
|
|
|
1
|
+
require 'bdb/base'
|
|
2
|
+
|
|
3
|
+
class Bdb::Database < Bdb::Base
|
|
4
|
+
def initialize(name, opts = {})
|
|
5
|
+
@name = name
|
|
6
|
+
super(opts)
|
|
7
|
+
end
|
|
8
|
+
attr_reader :name
|
|
9
|
+
|
|
10
|
+
def db(index = nil)
|
|
11
|
+
if @db.nil?
|
|
12
|
+
@db = {}
|
|
13
|
+
open_flags = master? ? Bdb::DB_CREATE : Bdb::DB_RDONLY
|
|
14
|
+
transaction(false) do
|
|
15
|
+
primary_db = environment.env.db
|
|
16
|
+
primary_db.pagesize = config[:page_size] if config[:page_size]
|
|
17
|
+
primary_db.open(transaction, name, nil, Bdb::Db::BTREE, open_flags, 0)
|
|
18
|
+
@db[:primary_key] = primary_db
|
|
19
|
+
|
|
20
|
+
indexes.each do |field, opts|
|
|
21
|
+
index_callback = lambda do |db, key, data|
|
|
22
|
+
value = Marshal.load(data)
|
|
23
|
+
index_key = value.kind_of?(Hash) ? value[:field] : value.send(field)
|
|
24
|
+
if opts[:multi_key] and index_key.kind_of?(Array)
|
|
25
|
+
# Index multiple keys. If the key is an array, you must wrap it with an outer array.
|
|
26
|
+
index_key.collect {|k| Tuple.dump(k)}
|
|
27
|
+
elsif index_key
|
|
28
|
+
# Index a single key.
|
|
29
|
+
Tuple.dump(index_key)
|
|
30
|
+
end
|
|
31
|
+
end
|
|
32
|
+
index_db = environment.env.db
|
|
33
|
+
index_db.flags = Bdb::DB_DUPSORT unless opts[:unique]
|
|
34
|
+
index_db.pagesize = config[:page_size] if config[:page_size]
|
|
35
|
+
index_db.open(transaction, "#{name}_by_#{field}", nil, Bdb::Db::BTREE, open_flags, 0)
|
|
36
|
+
primary_db.associate(transaction, index_db, open_flags, index_callback)
|
|
37
|
+
@db[field] = index_db
|
|
38
|
+
end
|
|
39
|
+
end
|
|
40
|
+
end
|
|
41
|
+
@db[index || :primary_key]
|
|
42
|
+
rescue Bdb::DbError => e
|
|
43
|
+
# Retry if the database doesn't exist and we are a replication client.
|
|
44
|
+
if not master? and e.code == Errno::ENOENT::Errno
|
|
45
|
+
close
|
|
46
|
+
sleep 1
|
|
47
|
+
retry
|
|
48
|
+
else
|
|
49
|
+
raise(e)
|
|
50
|
+
end
|
|
51
|
+
end
|
|
52
|
+
|
|
53
|
+
def close
|
|
54
|
+
return unless @db
|
|
55
|
+
synchronize do
|
|
56
|
+
@db.each {|field, db| db.close(0)}
|
|
57
|
+
@db = nil
|
|
58
|
+
end
|
|
59
|
+
end
|
|
60
|
+
|
|
61
|
+
def close_environment
|
|
62
|
+
environment.close
|
|
63
|
+
end
|
|
64
|
+
|
|
65
|
+
def count(field, key)
|
|
66
|
+
with_cursor(db(field)) do |cursor|
|
|
67
|
+
k, v = cursor.get(Tuple.dump(key), nil, Bdb::DB_SET)
|
|
68
|
+
k ? cursor.count : 0
|
|
69
|
+
end
|
|
70
|
+
end
|
|
71
|
+
|
|
72
|
+
def get(*keys, &block)
|
|
73
|
+
opts = keys.last.kind_of?(Hash) ? keys.pop : {}
|
|
74
|
+
db = db(opts[:field])
|
|
75
|
+
set = Bdb::ResultSet.new(opts, &block)
|
|
76
|
+
flags = opts[:modify] ? Bdb::DB_RMW : 0
|
|
77
|
+
flags = 0 if environment.disable_transactions?
|
|
78
|
+
|
|
79
|
+
keys.each do |key|
|
|
80
|
+
key = get_key(key, opts)
|
|
81
|
+
if key == :all
|
|
82
|
+
with_cursor(db) do |cursor|
|
|
83
|
+
if opts[:reverse]
|
|
84
|
+
k,v = cursor.get(nil, nil, Bdb::DB_LAST | flags) # Start at the last item.
|
|
85
|
+
iter = lambda {cursor.get(nil, nil, Bdb::DB_PREV | flags)} # Move backward.
|
|
86
|
+
else
|
|
87
|
+
k,v = cursor.get(nil, nil, Bdb::DB_FIRST | flags) # Start at the first item.
|
|
88
|
+
iter = lambda {cursor.get(nil, nil, Bdb::DB_NEXT | flags)} # Move forward.
|
|
89
|
+
end
|
|
90
|
+
|
|
91
|
+
while k
|
|
92
|
+
set << unmarshal(v, :tuple => k)
|
|
93
|
+
k,v = iter.call
|
|
94
|
+
end
|
|
95
|
+
end
|
|
96
|
+
elsif key.kind_of?(Range)
|
|
97
|
+
# Fetch a range of keys.
|
|
98
|
+
with_cursor(db) do |cursor|
|
|
99
|
+
first = Tuple.dump(key.first)
|
|
100
|
+
last = Tuple.dump(key.last)
|
|
101
|
+
|
|
102
|
+
# Return false once we pass the end of the range.
|
|
103
|
+
cond = key.exclude_end? ? lambda {|k| k < last} : lambda {|k| k <= last}
|
|
104
|
+
if opts[:reverse]
|
|
105
|
+
iter = lambda {cursor.get(nil, nil, Bdb::DB_PREV | flags)} # Move backward.
|
|
106
|
+
|
|
107
|
+
# Position the cursor at the end of the range.
|
|
108
|
+
k,v = cursor.get(last, nil, Bdb::DB_SET_RANGE | flags) || cursor.get(nil, nil, Bdb::DB_LAST | flags)
|
|
109
|
+
while k and not cond.call(k)
|
|
110
|
+
k,v = iter.call
|
|
111
|
+
end
|
|
112
|
+
|
|
113
|
+
cond = lambda {|k| k >= first} # Change the condition to stop when we move past the start.
|
|
114
|
+
else
|
|
115
|
+
k,v = cursor.get(first, nil, Bdb::DB_SET_RANGE | flags) # Start at the beginning of the range.
|
|
116
|
+
iter = lambda {cursor.get(nil, nil, Bdb::DB_NEXT | flags)} # Move forward.
|
|
117
|
+
end
|
|
118
|
+
|
|
119
|
+
while k and cond.call(k)
|
|
120
|
+
set << unmarshal(v, :tuple => k)
|
|
121
|
+
k,v = iter.call
|
|
122
|
+
end
|
|
123
|
+
end
|
|
124
|
+
else
|
|
125
|
+
if (db.flags & Bdb::DB_DUPSORT) == 0
|
|
126
|
+
synchronize do
|
|
127
|
+
# There can only be one item for each key.
|
|
128
|
+
data = db.get(transaction, Tuple.dump(key), nil, flags)
|
|
129
|
+
set << unmarshal(data, :key => key) if data
|
|
130
|
+
end
|
|
131
|
+
else
|
|
132
|
+
# Have to use a cursor because there may be multiple items with each key.
|
|
133
|
+
with_cursor(db) do |cursor|
|
|
134
|
+
k,v = cursor.get(Tuple.dump(key), nil, Bdb::DB_SET | flags)
|
|
135
|
+
while k
|
|
136
|
+
set << unmarshal(v, :tuple => k)
|
|
137
|
+
k,v = cursor.get(nil, nil, Bdb::DB_NEXT_DUP | flags)
|
|
138
|
+
end
|
|
139
|
+
end
|
|
140
|
+
end
|
|
141
|
+
end
|
|
142
|
+
end
|
|
143
|
+
set.results
|
|
144
|
+
rescue Bdb::ResultSet::LimitReached
|
|
145
|
+
set.results
|
|
146
|
+
end
|
|
147
|
+
|
|
148
|
+
def [](key)
|
|
149
|
+
get(key).first
|
|
150
|
+
end
|
|
151
|
+
|
|
152
|
+
def set(key, value, opts = {})
|
|
153
|
+
synchronize do
|
|
154
|
+
key = Tuple.dump(key)
|
|
155
|
+
value = Marshal.dump(value)
|
|
156
|
+
flags = opts[:create] ? Bdb::DB_NOOVERWRITE : 0
|
|
157
|
+
db.put(transaction, key, value, flags)
|
|
158
|
+
value
|
|
159
|
+
end
|
|
160
|
+
end
|
|
161
|
+
|
|
162
|
+
def delete(key)
|
|
163
|
+
synchronize do
|
|
164
|
+
key = Tuple.dump(key)
|
|
165
|
+
db.del(transaction, key, 0)
|
|
166
|
+
end
|
|
167
|
+
end
|
|
168
|
+
|
|
169
|
+
# Deletes all records in the database. Beware!
|
|
170
|
+
def truncate!
|
|
171
|
+
synchronize do
|
|
172
|
+
db.truncate(transaction)
|
|
173
|
+
end
|
|
174
|
+
end
|
|
175
|
+
|
|
176
|
+
def sync
|
|
177
|
+
db.sync
|
|
178
|
+
end
|
|
179
|
+
|
|
180
|
+
private
|
|
181
|
+
|
|
182
|
+
def get_key(key, opts)
|
|
183
|
+
if opts[:partial] and not key.kind_of?(Range) and not key == :all
|
|
184
|
+
first = [*key]
|
|
185
|
+
last = first + [true]
|
|
186
|
+
key = first..last
|
|
187
|
+
end
|
|
188
|
+
key
|
|
189
|
+
end
|
|
190
|
+
|
|
191
|
+
def unmarshal(value, opts = {})
|
|
192
|
+
value = Marshal.load(value)
|
|
193
|
+
value.bdb_locator_key = opts[:tuple] ? Tuple.load(opts[:tuple]) : [*opts[:key]]
|
|
194
|
+
value
|
|
195
|
+
end
|
|
196
|
+
|
|
197
|
+
def with_cursor(db)
|
|
198
|
+
synchronize do
|
|
199
|
+
begin
|
|
200
|
+
cursor = db.cursor(transaction, 0)
|
|
201
|
+
yield(cursor)
|
|
202
|
+
ensure
|
|
203
|
+
cursor.close if cursor
|
|
204
|
+
end
|
|
205
|
+
end
|
|
206
|
+
end
|
|
207
|
+
end
|
|
@@ -0,0 +1,135 @@
|
|
|
1
|
+
require 'thread'
|
|
2
|
+
require 'bdb/replication'
|
|
3
|
+
|
|
4
|
+
class Bdb::Environment
|
|
5
|
+
@@env = {}
|
|
6
|
+
def self.new(path, database = nil)
|
|
7
|
+
# Only allow one environment per path.
|
|
8
|
+
path = File.expand_path(path)
|
|
9
|
+
@@env[path] ||= super(path)
|
|
10
|
+
@@env[path].databases << database if database
|
|
11
|
+
@@env[path]
|
|
12
|
+
end
|
|
13
|
+
|
|
14
|
+
def initialize(path)
|
|
15
|
+
@path = path
|
|
16
|
+
end
|
|
17
|
+
attr_reader :path
|
|
18
|
+
|
|
19
|
+
def self.[](path)
|
|
20
|
+
new(path)
|
|
21
|
+
end
|
|
22
|
+
|
|
23
|
+
def self.config(config = {})
|
|
24
|
+
@config ||= {
|
|
25
|
+
:max_locks => 5000,
|
|
26
|
+
:lock_timeout => 30 * 1000 * 1000,
|
|
27
|
+
:txn_timeout => 30 * 1000 * 1000,
|
|
28
|
+
:cache_size => 1 * 1024 * 1024,
|
|
29
|
+
}
|
|
30
|
+
@config.merge!(config)
|
|
31
|
+
end
|
|
32
|
+
|
|
33
|
+
def config(config = {})
|
|
34
|
+
@config ||= self.class.config
|
|
35
|
+
@config.merge!(config)
|
|
36
|
+
end
|
|
37
|
+
|
|
38
|
+
include Replication
|
|
39
|
+
def self.replicate(path, opts)
|
|
40
|
+
self[path].replicate(opts)
|
|
41
|
+
end
|
|
42
|
+
|
|
43
|
+
def databases
|
|
44
|
+
@databases ||= []
|
|
45
|
+
end
|
|
46
|
+
|
|
47
|
+
def env
|
|
48
|
+
if @env.nil?
|
|
49
|
+
synchronize do
|
|
50
|
+
@env = Bdb::Env.new(0)
|
|
51
|
+
if disable_transactions?
|
|
52
|
+
env_flags = Bdb::DB_CREATE | Bdb::DB_INIT_MPOOL
|
|
53
|
+
else
|
|
54
|
+
env_flags = Bdb::DB_CREATE | Bdb::DB_INIT_TXN | Bdb::DB_INIT_LOCK |
|
|
55
|
+
Bdb::DB_REGISTER | Bdb::DB_RECOVER | Bdb::DB_INIT_MPOOL | Bdb::DB_THREAD
|
|
56
|
+
|
|
57
|
+
env_flags |= Bdb::DB_INIT_REP if replicate?
|
|
58
|
+
end
|
|
59
|
+
@env.cachesize = config[:cache_size] if config[:cache_size]
|
|
60
|
+
@env.set_timeout(config[:txn_timeout], Bdb::DB_SET_TXN_TIMEOUT) if config[:txn_timeout]
|
|
61
|
+
@env.set_timeout(config[:lock_timeout], Bdb::DB_SET_LOCK_TIMEOUT) if config[:lock_timeout]
|
|
62
|
+
@env.set_lk_max_locks(config[:max_locks]) if config[:max_locks]
|
|
63
|
+
@env.set_lk_detect(Bdb::DB_LOCK_RANDOM)
|
|
64
|
+
@env.flags_on = Bdb::DB_TXN_WRITE_NOSYNC | Bdb::DB_TIME_NOTGRANTED
|
|
65
|
+
init_replication(@env) if replicate?
|
|
66
|
+
|
|
67
|
+
@env.open(path, env_flags, 0)
|
|
68
|
+
start_replication(@env) if replicate?
|
|
69
|
+
@exit_handler ||= at_exit { close }
|
|
70
|
+
end
|
|
71
|
+
end
|
|
72
|
+
@env
|
|
73
|
+
end
|
|
74
|
+
|
|
75
|
+
def close
|
|
76
|
+
return unless @env
|
|
77
|
+
synchronize do
|
|
78
|
+
databases.each {|database| database.close}
|
|
79
|
+
@env.close
|
|
80
|
+
@env = nil
|
|
81
|
+
end
|
|
82
|
+
end
|
|
83
|
+
|
|
84
|
+
def transaction(nested = true)
|
|
85
|
+
return @transaction unless block_given?
|
|
86
|
+
return yield if disable_transactions?
|
|
87
|
+
|
|
88
|
+
synchronize do
|
|
89
|
+
parent = @transaction
|
|
90
|
+
begin
|
|
91
|
+
@transaction = env.txn_begin(nested ? parent : nil, 0)
|
|
92
|
+
value = yield
|
|
93
|
+
@transaction.commit(0)
|
|
94
|
+
@transaction = nil
|
|
95
|
+
value
|
|
96
|
+
ensure
|
|
97
|
+
@transaction.abort if @transaction
|
|
98
|
+
@transaction = parent
|
|
99
|
+
end
|
|
100
|
+
end
|
|
101
|
+
end
|
|
102
|
+
|
|
103
|
+
def checkpoint(opts = {})
|
|
104
|
+
return if disable_transactions?
|
|
105
|
+
env.txn_checkpoint(opts[:kbyte] || 0, opts[:min] || 0, opts[:force] ? Bdb::DB_FORCE : 0)
|
|
106
|
+
end
|
|
107
|
+
|
|
108
|
+
def disable_transactions?
|
|
109
|
+
config[:disable_transactions]
|
|
110
|
+
end
|
|
111
|
+
|
|
112
|
+
def synchronize
|
|
113
|
+
@mutex ||= Mutex.new
|
|
114
|
+
if @thread_id == thread_id
|
|
115
|
+
yield
|
|
116
|
+
else
|
|
117
|
+
@mutex.synchronize do
|
|
118
|
+
begin
|
|
119
|
+
@thread_id = thread_id
|
|
120
|
+
Thread.exclusive { yield }
|
|
121
|
+
ensure
|
|
122
|
+
@thread_id = nil
|
|
123
|
+
end
|
|
124
|
+
end
|
|
125
|
+
end
|
|
126
|
+
rescue Bdb::DbError => e
|
|
127
|
+
exit!(9) if e.code == Bdb::DB_RUNRECOVERY
|
|
128
|
+
retry if transaction.nil? and e.code == Bdb::DB_LOCK_DEADLOCK
|
|
129
|
+
raise e
|
|
130
|
+
end
|
|
131
|
+
|
|
132
|
+
def thread_id
|
|
133
|
+
Thread.current.object_id
|
|
134
|
+
end
|
|
135
|
+
end
|
|
@@ -0,0 +1,74 @@
|
|
|
1
|
+
require 'bdb/base'
|
|
2
|
+
|
|
3
|
+
class Bdb::PartitionedDatabase < Bdb::Base
|
|
4
|
+
SEPARATOR = '__'
|
|
5
|
+
PARTITION_PATTERN = /^[-\w]*$/
|
|
6
|
+
|
|
7
|
+
def initialize(base_name, opts = {})
|
|
8
|
+
@base_name = base_name
|
|
9
|
+
@partition_by = opts.delete(:partition_by)
|
|
10
|
+
super(opts)
|
|
11
|
+
end
|
|
12
|
+
attr_reader :base_name, :partition_by, :partition
|
|
13
|
+
|
|
14
|
+
def databases
|
|
15
|
+
@databases ||= {}
|
|
16
|
+
end
|
|
17
|
+
|
|
18
|
+
def database(partition = nil)
|
|
19
|
+
partition ||= self.partition
|
|
20
|
+
raise 'partition value required' if partition.nil?
|
|
21
|
+
partition = partition.to_s
|
|
22
|
+
raise "invalid partition value: #{partition}" unless partition =~ PARTITION_PATTERN
|
|
23
|
+
|
|
24
|
+
databases[partition] ||= begin
|
|
25
|
+
name = [partition, base_name].join(SEPARATOR)
|
|
26
|
+
database = Bdb::Database.new(name, config)
|
|
27
|
+
indexes.each do |field, opts|
|
|
28
|
+
database.index_by(field, opts)
|
|
29
|
+
end
|
|
30
|
+
database
|
|
31
|
+
end
|
|
32
|
+
end
|
|
33
|
+
|
|
34
|
+
def partitions
|
|
35
|
+
Dir[environment.path + "/*#{SEPARATOR}#{base_name}"].collect do |file|
|
|
36
|
+
File.basename(file).split(SEPARATOR).first
|
|
37
|
+
end
|
|
38
|
+
end
|
|
39
|
+
|
|
40
|
+
def with_partition(partition)
|
|
41
|
+
@partition, old_partition = partition, @partition
|
|
42
|
+
yield
|
|
43
|
+
ensure
|
|
44
|
+
@partition = old_partition
|
|
45
|
+
end
|
|
46
|
+
|
|
47
|
+
def close
|
|
48
|
+
databases.each do |partition, database|
|
|
49
|
+
database.close
|
|
50
|
+
end
|
|
51
|
+
@databases.clear
|
|
52
|
+
end
|
|
53
|
+
|
|
54
|
+
def get(*keys, &block)
|
|
55
|
+
opts = keys.last.kind_of?(Hash) ? keys.last : {}
|
|
56
|
+
database(opts[partition_by]).get(*keys, &block)
|
|
57
|
+
end
|
|
58
|
+
|
|
59
|
+
def set(key, value, opts = {})
|
|
60
|
+
partition = get_field(partition_by, value)
|
|
61
|
+
database(partition).set(key, value, opts)
|
|
62
|
+
end
|
|
63
|
+
|
|
64
|
+
def delete(key, opts = {})
|
|
65
|
+
database(opts[partition_by]).delete(key)
|
|
66
|
+
end
|
|
67
|
+
|
|
68
|
+
# Deletes all records in the database. Beware!
|
|
69
|
+
def truncate!
|
|
70
|
+
partitions.each do |partition|
|
|
71
|
+
database(partition).truncate!
|
|
72
|
+
end
|
|
73
|
+
end
|
|
74
|
+
end
|
|
@@ -0,0 +1,68 @@
|
|
|
1
|
+
require 'socket'
|
|
2
|
+
module Replication
|
|
3
|
+
DEFAULT_PORT = 3463
|
|
4
|
+
NUM_THREADS = 1
|
|
5
|
+
|
|
6
|
+
ACK_POLICY = {
|
|
7
|
+
:all => Bdb::DB_REPMGR_ACKS_ALL,
|
|
8
|
+
:all_peers => Bdb::DB_REPMGR_ACKS_ALL_PEERS,
|
|
9
|
+
:none => Bdb::DB_REPMGR_ACKS_NONE,
|
|
10
|
+
:one => Bdb::DB_REPMGR_ACKS_ONE,
|
|
11
|
+
:one_peer => Bdb::DB_REPMGR_ACKS_ONE_PEER,
|
|
12
|
+
:quorom => Bdb::DB_REPMGR_ACKS_QUORUM,
|
|
13
|
+
}
|
|
14
|
+
|
|
15
|
+
def replicate?
|
|
16
|
+
not @replicate.nil?
|
|
17
|
+
end
|
|
18
|
+
|
|
19
|
+
def master?
|
|
20
|
+
not replicate? or replicate[:master]
|
|
21
|
+
end
|
|
22
|
+
|
|
23
|
+
def replicate(opts = nil)
|
|
24
|
+
return @replicate if opts.nil?
|
|
25
|
+
|
|
26
|
+
master = normalize_host(opts.delete(:from))
|
|
27
|
+
clients = [*opts.delete(:to)].compact.collect {|h| normalize_host(h)}
|
|
28
|
+
local = normalize_host(opts.delete(:host) || ENV['BDB_REPLICATION_HOST'], opts.delete(:port))
|
|
29
|
+
remote = clients + [master] - [local]
|
|
30
|
+
|
|
31
|
+
opts[:master] = (local == master)
|
|
32
|
+
opts[:local] = local
|
|
33
|
+
opts[:remote] = remote
|
|
34
|
+
opts[:num_threads] ||= NUM_THREADS
|
|
35
|
+
@replicate = opts
|
|
36
|
+
|
|
37
|
+
env
|
|
38
|
+
end
|
|
39
|
+
|
|
40
|
+
private
|
|
41
|
+
|
|
42
|
+
def init_replication(env)
|
|
43
|
+
env.set_verbose(Bdb::DB_VERB_REPLICATION, true) if replicate[:verbose]
|
|
44
|
+
env.rep_priority = replicate[:master] ? 1 : 0
|
|
45
|
+
env.repmgr_ack_policy = ACK_POLICY[replicate[:ack_policy]] if replicate[:ack_policy]
|
|
46
|
+
env.repmgr_set_local_site(*replicate[:local])
|
|
47
|
+
replicate[:remote].each do |s|
|
|
48
|
+
env.repmgr_add_remote_site(*s)
|
|
49
|
+
end
|
|
50
|
+
env.rep_nsites = replicate[:remote].size + 1
|
|
51
|
+
end
|
|
52
|
+
|
|
53
|
+
def start_replication(env)
|
|
54
|
+
env.repmgr_start(replicate[:num_threads], replicate[:master] ? Bdb::DB_REP_MASTER : Bdb::DB_REP_CLIENT)
|
|
55
|
+
end
|
|
56
|
+
|
|
57
|
+
def normalize_host(*host)
|
|
58
|
+
host = host.compact.join(':')
|
|
59
|
+
host, port = host.split(':')
|
|
60
|
+
host ||= Socket.gethostname
|
|
61
|
+
port ||= DEFAULT_PORT
|
|
62
|
+
port = port.to_i
|
|
63
|
+
|
|
64
|
+
addr_info = Socket.getaddrinfo(host.strip, port)
|
|
65
|
+
ip = addr_info.detect {|i| i[0] == 'AF_INET'}[3]
|
|
66
|
+
[ip, port]
|
|
67
|
+
end
|
|
68
|
+
end
|