bdb 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- data/LICENSE +20 -0
- data/README.textile +95 -0
- data/VERSION +1 -0
- data/ext/bdb.c +3025 -0
- data/ext/bdb.h +104 -0
- data/ext/extconf.rb +91 -0
- data/lib/bdb/base.rb +60 -0
- data/lib/bdb/database.rb +184 -0
- data/lib/bdb/environment.rb +119 -0
- data/lib/bdb/partitioned_database.rb +74 -0
- data/lib/bdb/result_set.rb +41 -0
- data/test/benchmark.rb +31 -0
- data/test/cursor_test.rb +150 -0
- data/test/db_test.rb +157 -0
- data/test/env_test.rb +101 -0
- data/test/simple_test.rb +93 -0
- data/test/stat_test.rb +22 -0
- data/test/test_helper.rb +7 -0
- data/test/txn_test.rb +74 -0
- metadata +82 -0
data/ext/bdb.h
ADDED
@@ -0,0 +1,104 @@
|
|
1
|
+
|
2
|
+
#ifndef BDB2_H
|
3
|
+
#define BDB2_H
|
4
|
+
|
5
|
+
#include <ruby.h>
|
6
|
+
|
7
|
+
#ifdef stat
|
8
|
+
#undef stat
|
9
|
+
#endif
|
10
|
+
|
11
|
+
#ifdef close
|
12
|
+
#undef close
|
13
|
+
#endif
|
14
|
+
|
15
|
+
#ifdef rename
|
16
|
+
#undef rename
|
17
|
+
#endif
|
18
|
+
|
19
|
+
#include <version.h>
|
20
|
+
#include <db.h>
|
21
|
+
|
22
|
+
#define NOTXN NULL
|
23
|
+
|
24
|
+
#ifdef OPEN_MAX
|
25
|
+
#define LMAXFD OPEN_MAX
|
26
|
+
#else
|
27
|
+
#ifdef FOPEN_MAX
|
28
|
+
#define LMAXFD FOPEN_MAX
|
29
|
+
#endif
|
30
|
+
#endif
|
31
|
+
#ifndef LMAXFD
|
32
|
+
#error "No max fd define available."
|
33
|
+
#endif
|
34
|
+
|
35
|
+
#define FNLEN 40
|
36
|
+
|
37
|
+
#define filename_copy(fp,fv) \
|
38
|
+
strncpy(fp,RSTRING_PTR(fv),FNLEN);
|
39
|
+
|
40
|
+
#define filename_dup(fpd,fps) \
|
41
|
+
strncpy(fpd,fps,FNLEN);
|
42
|
+
|
43
|
+
typedef struct s_envh {
|
44
|
+
VALUE self;
|
45
|
+
DB_ENV *env;
|
46
|
+
VALUE adb; /* Ruby array holding opened databases */
|
47
|
+
VALUE atxn; /* Ruby array holding open transactions */
|
48
|
+
} t_envh;
|
49
|
+
|
50
|
+
typedef struct s_dbh {
|
51
|
+
VALUE self;
|
52
|
+
DB *db;
|
53
|
+
int db_opened;
|
54
|
+
VALUE aproc;
|
55
|
+
VALUE sproc; /* key sorting callback */
|
56
|
+
|
57
|
+
t_envh *env; /* Parent environment, NULL if not opened from one */
|
58
|
+
VALUE adbc; /* Ruby array holding opened cursor */
|
59
|
+
char filename[FNLEN+1];
|
60
|
+
} t_dbh;
|
61
|
+
|
62
|
+
typedef struct s_dbch {
|
63
|
+
VALUE self;
|
64
|
+
DBC *dbc;
|
65
|
+
t_dbh *db;
|
66
|
+
char filename[FNLEN+1];
|
67
|
+
} t_dbch;
|
68
|
+
|
69
|
+
typedef struct s_txnh {
|
70
|
+
VALUE self;
|
71
|
+
DB_TXN *txn;
|
72
|
+
t_envh *env;
|
73
|
+
} t_txnh;
|
74
|
+
|
75
|
+
#define cu(b,m) \
|
76
|
+
rb_define_const(b,#m,UINT2NUM(m))
|
77
|
+
|
78
|
+
#define ci(b,m) \
|
79
|
+
rb_define_const(b,#m,INT2NUM(m))
|
80
|
+
|
81
|
+
#define cs(b,m) \
|
82
|
+
rb_define_const(b,#m,rb_str_new2(m))
|
83
|
+
|
84
|
+
#define simple_set(fname) \
|
85
|
+
VALUE db_ ## fname ## _eq(VALUE obj, VALUE v) \
|
86
|
+
{ \
|
87
|
+
rb_ivar_set(obj,fv_ ## fname,v); \
|
88
|
+
return obj; \
|
89
|
+
}
|
90
|
+
|
91
|
+
#define attr_writer(fname) \
|
92
|
+
VALUE fname ## _writer(VALUE obj, VALUE v) \
|
93
|
+
{ \
|
94
|
+
rb_ivar_set(obj,fv_ ## fname,v); \
|
95
|
+
return obj; \
|
96
|
+
}
|
97
|
+
|
98
|
+
#define attr_reader(fname) \
|
99
|
+
VALUE fname ## _reader(VALUE obj) \
|
100
|
+
{ \
|
101
|
+
return rb_ivar_get(obj,fv_ ## fname); \
|
102
|
+
}
|
103
|
+
|
104
|
+
#endif
|
data/ext/extconf.rb
ADDED
@@ -0,0 +1,91 @@
|
|
1
|
+
#!/usr/bin/env ruby
|
2
|
+
require 'mkmf'
|
3
|
+
|
4
|
+
inc, lib = dir_config('db')
|
5
|
+
|
6
|
+
# OS X compatibility
|
7
|
+
if(PLATFORM =~ /darwin/) then
|
8
|
+
# test if Bdb is probably universal
|
9
|
+
|
10
|
+
filetype = (IO.popen("file #{inc}/../db_dump").readline.chomp rescue nil)
|
11
|
+
# if it's not universal, ARCHFLAGS should be set
|
12
|
+
if((filetype !~ /universal binary/) && ENV['ARCHFLAGS'].nil?) then
|
13
|
+
arch = (IO.popen("uname -m").readline.chomp rescue nil)
|
14
|
+
$stderr.write %{
|
15
|
+
=========== WARNING ===========
|
16
|
+
|
17
|
+
You are building this extension on OS X without setting the
|
18
|
+
ARCHFLAGS environment variable, and BerkeleyDB does not appear
|
19
|
+
to have been built as a universal binary. If you are seeing this
|
20
|
+
message, that means that the build will probably fail.
|
21
|
+
|
22
|
+
Try setting the environment variable ARCHFLAGS
|
23
|
+
to '-arch #{arch}' before building.
|
24
|
+
|
25
|
+
For example:
|
26
|
+
(in bash) $ export ARCHFLAGS='-arch #{arch}'
|
27
|
+
(in tcsh) % setenv ARCHFLAGS '-arch #{arch}'
|
28
|
+
|
29
|
+
Then try building again.
|
30
|
+
|
31
|
+
===================================
|
32
|
+
|
33
|
+
}
|
34
|
+
# We don't exit here. Who knows? It might build.
|
35
|
+
end
|
36
|
+
end
|
37
|
+
|
38
|
+
versions=%w(db-4.8 db-4.7 db-4.6 db-4.5 db-4.4 db-4.3 db-4.2)
|
39
|
+
until versions.empty?
|
40
|
+
(lib_ok = have_library(versions.shift,'db_version', 'db.h')) && break
|
41
|
+
end
|
42
|
+
|
43
|
+
def create_header
|
44
|
+
if File.exist?("bdb_aux._c")
|
45
|
+
message("Not writing bdb_aux._c (defines), already exists\n")
|
46
|
+
return
|
47
|
+
end
|
48
|
+
|
49
|
+
message("Writing bdb_aux._c (defines), this takes a while\n")
|
50
|
+
db_header = $CPPFLAGS.split.select { |f| f =~ /^-I/ }.map { |e|
|
51
|
+
f = File.join(e[2..-1], 'db.h')
|
52
|
+
File.exists?(f) ? f : nil
|
53
|
+
}.select { |e| e }.first
|
54
|
+
|
55
|
+
n=0
|
56
|
+
defines=[]
|
57
|
+
File.open(db_header) {|fd|
|
58
|
+
File.open("bdb_aux._c","w") {|hd|
|
59
|
+
hd.puts("/* This file automatically generated by extconf.rb */\n")
|
60
|
+
fd.each_line {|l|
|
61
|
+
if l =~ %r{^#define\s+(DBC?_\w*)\s+([^\/]*)\s*(.*?)(\/\*.*)?$}
|
62
|
+
name = $1
|
63
|
+
value = $2
|
64
|
+
if macro_defined?(name,"#include <db.h>")
|
65
|
+
case value
|
66
|
+
when /^"/
|
67
|
+
hd.print(%Q{cs(mBdb,%s);\n}%[name])
|
68
|
+
when /^\(?(0x|\d)/
|
69
|
+
hd.print(%Q{cu(mBdb,%s);\n}%[name])
|
70
|
+
when /^\(?-/
|
71
|
+
hd.print(%Q{ci(mBdb,%s);\n}%[name])
|
72
|
+
else
|
73
|
+
$stderr.puts "don't know how to handle #{name} #{value.strip}, guessing UINT"
|
74
|
+
hd.print(%Q{cu(mBdb,%s);\n}%[name])
|
75
|
+
end
|
76
|
+
n+=1
|
77
|
+
end
|
78
|
+
end
|
79
|
+
}
|
80
|
+
}
|
81
|
+
message("\nwrote #{n} defines\n")
|
82
|
+
}
|
83
|
+
end
|
84
|
+
|
85
|
+
if lib_ok
|
86
|
+
create_header
|
87
|
+
create_makefile('bdb')
|
88
|
+
else
|
89
|
+
$stderr.puts("cannot create Makefile")
|
90
|
+
exit 1
|
91
|
+
end
|
data/lib/bdb/base.rb
ADDED
@@ -0,0 +1,60 @@
|
|
1
|
+
require 'bdb'
|
2
|
+
require 'tuple'
|
3
|
+
require 'bdb/environment'
|
4
|
+
require 'bdb/result_set'
|
5
|
+
|
6
|
+
class Bdb::Base
|
7
|
+
def initialize(opts)
|
8
|
+
@config = Bdb::Environment.config.merge(opts)
|
9
|
+
@indexes = {}
|
10
|
+
end
|
11
|
+
attr_reader :indexes
|
12
|
+
|
13
|
+
def config(config = {})
|
14
|
+
@config.merge!(config)
|
15
|
+
end
|
16
|
+
|
17
|
+
def index_by(field, opts = {})
|
18
|
+
raise "index on #{field} already exists" if indexes[field]
|
19
|
+
indexes[field] = opts
|
20
|
+
end
|
21
|
+
|
22
|
+
def environment
|
23
|
+
@environment ||= Bdb::Environment.new(config[:path], self)
|
24
|
+
end
|
25
|
+
|
26
|
+
def transaction(nested = true, &block)
|
27
|
+
environment.transaction(nested, &block)
|
28
|
+
end
|
29
|
+
|
30
|
+
def synchronize(&block)
|
31
|
+
environment.synchronize(&block)
|
32
|
+
end
|
33
|
+
|
34
|
+
def checkpoint(opts = {})
|
35
|
+
environment.synchronize(opts)
|
36
|
+
end
|
37
|
+
|
38
|
+
private
|
39
|
+
|
40
|
+
def get_field(field, value)
|
41
|
+
value.kind_of?(Hash) ? value[field] : value.send(field)
|
42
|
+
end
|
43
|
+
end
|
44
|
+
|
45
|
+
class Object
|
46
|
+
attr_accessor :bdb_locator_key
|
47
|
+
end
|
48
|
+
|
49
|
+
# Array comparison should try Tuple comparison first.
|
50
|
+
class Array
|
51
|
+
cmp = instance_method(:<=>)
|
52
|
+
|
53
|
+
define_method(:<=>) do |other|
|
54
|
+
begin
|
55
|
+
Tuple.dump(self) <=> Tuple.dump(other)
|
56
|
+
rescue TypeError => e
|
57
|
+
cmp.bind(self).call(other)
|
58
|
+
end
|
59
|
+
end
|
60
|
+
end
|
data/lib/bdb/database.rb
ADDED
@@ -0,0 +1,184 @@
|
|
1
|
+
require 'bdb/base'
|
2
|
+
|
3
|
+
class Bdb::Database < Bdb::Base
|
4
|
+
def initialize(name, opts = {})
|
5
|
+
@name = name
|
6
|
+
super(opts)
|
7
|
+
end
|
8
|
+
attr_reader :name
|
9
|
+
|
10
|
+
def db(index = nil)
|
11
|
+
if @db.nil?
|
12
|
+
@db = {}
|
13
|
+
transaction(false) do
|
14
|
+
primary_db = environment.env.db
|
15
|
+
primary_db.pagesize = config[:page_size] if config[:page_size]
|
16
|
+
primary_db.open(transaction, name, nil, Bdb::Db::BTREE, Bdb::DB_CREATE, 0)
|
17
|
+
@db[:primary_key] = primary_db
|
18
|
+
|
19
|
+
indexes.each do |field, opts|
|
20
|
+
index_callback = lambda do |db, key, data|
|
21
|
+
value = Marshal.load(data)
|
22
|
+
index_key = value.kind_of?(Hash) ? value[:field] : value.send(field)
|
23
|
+
if opts[:multi_key] and index_key.kind_of?(Array)
|
24
|
+
# Index multiple keys. If the key is an array, you must wrap it with an outer array.
|
25
|
+
index_key.collect {|k| Tuple.dump(k)}
|
26
|
+
elsif index_key
|
27
|
+
# Index a single key.
|
28
|
+
Tuple.dump(index_key)
|
29
|
+
end
|
30
|
+
end
|
31
|
+
index_db = environment.env.db
|
32
|
+
index_db.flags = Bdb::DB_DUPSORT unless opts[:unique]
|
33
|
+
index_db.pagesize = config[:page_size] if config[:page_size]
|
34
|
+
index_db.open(transaction, "#{name}_by_#{field}", nil, Bdb::Db::BTREE, Bdb::DB_CREATE, 0)
|
35
|
+
primary_db.associate(transaction, index_db, Bdb::DB_CREATE, index_callback)
|
36
|
+
@db[field] = index_db
|
37
|
+
end
|
38
|
+
end
|
39
|
+
end
|
40
|
+
@db[index || :primary_key]
|
41
|
+
end
|
42
|
+
|
43
|
+
def close
|
44
|
+
return unless @db
|
45
|
+
synchronize do
|
46
|
+
@db.each {|field, db| db.close(0)}
|
47
|
+
@db = nil
|
48
|
+
end
|
49
|
+
end
|
50
|
+
|
51
|
+
def count(field, key)
|
52
|
+
with_cursor(db(field)) do |cursor|
|
53
|
+
k, v = cursor.get(Tuple.dump(key), nil, Bdb::DB_SET)
|
54
|
+
k ? cursor.count : 0
|
55
|
+
end
|
56
|
+
end
|
57
|
+
|
58
|
+
def get(*keys, &block)
|
59
|
+
opts = keys.last.kind_of?(Hash) ? keys.pop : {}
|
60
|
+
db = db(opts[:field])
|
61
|
+
set = Bdb::ResultSet.new(opts, &block)
|
62
|
+
flags = opts[:modify] ? Bdb::DB_RMW : 0
|
63
|
+
flags = 0 if environment.disable_transactions?
|
64
|
+
|
65
|
+
keys.each do |key|
|
66
|
+
key = get_key(key, opts)
|
67
|
+
if key == :all
|
68
|
+
with_cursor(db) do |cursor|
|
69
|
+
if opts[:reverse]
|
70
|
+
k,v = cursor.get(nil, nil, Bdb::DB_LAST | flags) # Start at the last item.
|
71
|
+
iter = lambda {cursor.get(nil, nil, Bdb::DB_PREV | flags)} # Move backward.
|
72
|
+
else
|
73
|
+
k,v = cursor.get(nil, nil, Bdb::DB_FIRST | flags) # Start at the first item.
|
74
|
+
iter = lambda {cursor.get(nil, nil, Bdb::DB_NEXT | flags)} # Move forward.
|
75
|
+
end
|
76
|
+
|
77
|
+
while k
|
78
|
+
set << unmarshal(v, :tuple => k)
|
79
|
+
k,v = iter.call
|
80
|
+
end
|
81
|
+
end
|
82
|
+
elsif key.kind_of?(Range)
|
83
|
+
# Fetch a range of keys.
|
84
|
+
with_cursor(db) do |cursor|
|
85
|
+
first = Tuple.dump(key.first)
|
86
|
+
last = Tuple.dump(key.last)
|
87
|
+
|
88
|
+
# Return false once we pass the end of the range.
|
89
|
+
cond = key.exclude_end? ? lambda {|k| k < last} : lambda {|k| k <= last}
|
90
|
+
if opts[:reverse]
|
91
|
+
iter = lambda {cursor.get(nil, nil, Bdb::DB_PREV | flags)} # Move backward.
|
92
|
+
|
93
|
+
# Position the cursor at the end of the range.
|
94
|
+
k,v = cursor.get(last, nil, Bdb::DB_SET_RANGE | flags) || cursor.get(nil, nil, Bdb::DB_LAST | flags)
|
95
|
+
while k and not cond.call(k)
|
96
|
+
k,v = iter.call
|
97
|
+
end
|
98
|
+
|
99
|
+
cond = lambda {|k| k >= first} # Change the condition to stop when we move past the start.
|
100
|
+
else
|
101
|
+
k,v = cursor.get(first, nil, Bdb::DB_SET_RANGE | flags) # Start at the beginning of the range.
|
102
|
+
iter = lambda {cursor.get(nil, nil, Bdb::DB_NEXT | flags)} # Move forward.
|
103
|
+
end
|
104
|
+
|
105
|
+
while k and cond.call(k)
|
106
|
+
set << unmarshal(v, :tuple => k)
|
107
|
+
k,v = iter.call
|
108
|
+
end
|
109
|
+
end
|
110
|
+
else
|
111
|
+
if (db.flags & Bdb::DB_DUPSORT) == 0
|
112
|
+
synchronize do
|
113
|
+
# There can only be one item for each key.
|
114
|
+
data = db.get(transaction, Tuple.dump(key), nil, flags)
|
115
|
+
set << unmarshal(data, :key => key) if data
|
116
|
+
end
|
117
|
+
else
|
118
|
+
# Have to use a cursor because there may be multiple items with each key.
|
119
|
+
with_cursor(db) do |cursor|
|
120
|
+
k,v = cursor.get(Tuple.dump(key), nil, Bdb::DB_SET | flags)
|
121
|
+
while k
|
122
|
+
set << unmarshal(v, :tuple => k)
|
123
|
+
k,v = cursor.get(nil, nil, Bdb::DB_NEXT_DUP | flags)
|
124
|
+
end
|
125
|
+
end
|
126
|
+
end
|
127
|
+
end
|
128
|
+
end
|
129
|
+
set.results
|
130
|
+
rescue Bdb::ResultSet::LimitReached
|
131
|
+
set.results
|
132
|
+
end
|
133
|
+
|
134
|
+
def set(key, value, opts = {})
|
135
|
+
synchronize do
|
136
|
+
key = Tuple.dump(key)
|
137
|
+
value = Marshal.dump(value)
|
138
|
+
flags = opts[:create] ? Bdb::DB_NOOVERWRITE : 0
|
139
|
+
db.put(transaction, key, value, flags)
|
140
|
+
end
|
141
|
+
end
|
142
|
+
|
143
|
+
def delete(key)
|
144
|
+
synchronize do
|
145
|
+
key = Tuple.dump(key)
|
146
|
+
db.del(transaction, key, 0)
|
147
|
+
end
|
148
|
+
end
|
149
|
+
|
150
|
+
# Deletes all records in the database. Beware!
|
151
|
+
def truncate!
|
152
|
+
synchronize do
|
153
|
+
db.truncate(transaction)
|
154
|
+
end
|
155
|
+
end
|
156
|
+
|
157
|
+
private
|
158
|
+
|
159
|
+
def get_key(key, opts)
|
160
|
+
if opts[:partial] and not key.kind_of?(Range) and not key == :all
|
161
|
+
first = [*key]
|
162
|
+
last = first + [true]
|
163
|
+
key = first..last
|
164
|
+
end
|
165
|
+
key
|
166
|
+
end
|
167
|
+
|
168
|
+
def unmarshal(value, opts = {})
|
169
|
+
value = Marshal.load(value)
|
170
|
+
value.bdb_locator_key = opts[:tuple] ? Tuple.load(opts[:tuple]) : [*opts[:key]]
|
171
|
+
value
|
172
|
+
end
|
173
|
+
|
174
|
+
def with_cursor(db)
|
175
|
+
synchronize do
|
176
|
+
begin
|
177
|
+
cursor = db.cursor(transaction, 0)
|
178
|
+
yield(cursor)
|
179
|
+
ensure
|
180
|
+
cursor.close if cursor
|
181
|
+
end
|
182
|
+
end
|
183
|
+
end
|
184
|
+
end
|
@@ -0,0 +1,119 @@
|
|
1
|
+
class Bdb::Environment
|
2
|
+
@@env = {}
|
3
|
+
def self.new(path, database = nil)
|
4
|
+
path = File.expand_path(path)
|
5
|
+
@@env[path] ||= super(path)
|
6
|
+
@@env[path].databases << database if database
|
7
|
+
@@env[path]
|
8
|
+
end
|
9
|
+
|
10
|
+
def self.config(config = {})
|
11
|
+
@config ||= {
|
12
|
+
:max_locks => 5000,
|
13
|
+
:lock_timeout => 30 * 1000 * 1000,
|
14
|
+
:txn_timeout => 30 * 1000 * 1000,
|
15
|
+
:cache_size => 1 * 1024 * 1024,
|
16
|
+
}
|
17
|
+
@config.merge!(config)
|
18
|
+
end
|
19
|
+
|
20
|
+
def config(config = {})
|
21
|
+
@config ||= self.class.config
|
22
|
+
@config.merge!(config)
|
23
|
+
end
|
24
|
+
|
25
|
+
def initialize(path)
|
26
|
+
@path = path
|
27
|
+
end
|
28
|
+
attr_reader :path
|
29
|
+
|
30
|
+
def databases
|
31
|
+
@databases ||= []
|
32
|
+
end
|
33
|
+
|
34
|
+
def env
|
35
|
+
if @env.nil?
|
36
|
+
synchronize do
|
37
|
+
@env = Bdb::Env.new(0)
|
38
|
+
if disable_transactions?
|
39
|
+
env_flags = Bdb::DB_CREATE | Bdb::DB_INIT_MPOOL
|
40
|
+
else
|
41
|
+
env_flags = Bdb::DB_CREATE | Bdb::DB_INIT_TXN | Bdb::DB_INIT_LOCK |
|
42
|
+
Bdb::DB_REGISTER | Bdb::DB_RECOVER | Bdb::DB_INIT_MPOOL | Bdb::DB_THREAD
|
43
|
+
end
|
44
|
+
|
45
|
+
@env.cachesize = config[:cache_size] if config[:cache_size]
|
46
|
+
@env.set_timeout(config[:txn_timeout], Bdb::DB_SET_TXN_TIMEOUT) if config[:txn_timeout]
|
47
|
+
@env.set_timeout(config[:lock_timeout], Bdb::DB_SET_LOCK_TIMEOUT) if config[:lock_timeout]
|
48
|
+
@env.set_lk_max_locks(config[:max_locks]) if config[:max_locks]
|
49
|
+
@env.set_lk_detect(Bdb::DB_LOCK_RANDOM)
|
50
|
+
@env.flags_on = Bdb::DB_TXN_WRITE_NOSYNC | Bdb::DB_TIME_NOTGRANTED
|
51
|
+
@env.open(path, env_flags, 0)
|
52
|
+
|
53
|
+
@exit_handler ||= at_exit { close }
|
54
|
+
end
|
55
|
+
end
|
56
|
+
@env
|
57
|
+
end
|
58
|
+
|
59
|
+
def close
|
60
|
+
return unless @env
|
61
|
+
synchronize do
|
62
|
+
databases.each {|database| database.close}
|
63
|
+
@env.close
|
64
|
+
@env = nil
|
65
|
+
end
|
66
|
+
end
|
67
|
+
|
68
|
+
def transaction(nested = true)
|
69
|
+
return @transaction unless block_given?
|
70
|
+
return yield if disable_transactions?
|
71
|
+
|
72
|
+
synchronize do
|
73
|
+
parent = @transaction
|
74
|
+
begin
|
75
|
+
@transaction = env.txn_begin(nested ? parent : nil, 0)
|
76
|
+
value = yield
|
77
|
+
@transaction.commit(0)
|
78
|
+
@transaction = nil
|
79
|
+
value
|
80
|
+
ensure
|
81
|
+
@transaction.abort if @transaction
|
82
|
+
@transaction = parent
|
83
|
+
end
|
84
|
+
end
|
85
|
+
end
|
86
|
+
|
87
|
+
def checkpoint(opts = {})
|
88
|
+
return if disable_transactions?
|
89
|
+
env.txn_checkpoint(opts[:kbyte] || 0, opts[:min] || 0, opts[:force] ? Bdb::DB_FORCE : 0)
|
90
|
+
end
|
91
|
+
|
92
|
+
def disable_transactions?
|
93
|
+
config[:disable_transactions]
|
94
|
+
end
|
95
|
+
|
96
|
+
def synchronize
|
97
|
+
@mutex ||= Mutex.new
|
98
|
+
if @thread_id == thread_id
|
99
|
+
yield
|
100
|
+
else
|
101
|
+
@mutex.synchronize do
|
102
|
+
begin
|
103
|
+
@thread_id = thread_id
|
104
|
+
Thread.exclusive { yield }
|
105
|
+
ensure
|
106
|
+
@thread_id = nil
|
107
|
+
end
|
108
|
+
end
|
109
|
+
end
|
110
|
+
rescue Bdb::DbError => e
|
111
|
+
exit!(9) if e.code == Bdb::DB_RUNRECOVERY
|
112
|
+
retry if transaction.nil? and e.code == Bdb::DB_LOCK_DEADLOCK
|
113
|
+
raise e
|
114
|
+
end
|
115
|
+
|
116
|
+
def thread_id
|
117
|
+
Thread.current.object_id
|
118
|
+
end
|
119
|
+
end
|