archipelago 0.2.5 → 0.2.6

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,279 @@
1
+ # Archipelago - a distributed computing toolkit for ruby
2
+ # Copyright (C) 2006 Martin Kihlgren <zond at troja dot ath dot cx>
3
+ #
4
+ # This program is free software; you can redistribute it and/or
5
+ # modify it under the terms of the GNU General Public License
6
+ # as published by the Free Software Foundation; either version 2
7
+ # of the License, or (at your option) any later version.
8
+ #
9
+ # This program is distributed in the hope that it will be useful,
10
+ # but WITHOUT ANY WARRANTY; without even the implied warranty of
11
+ # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12
+ # GNU General Public License for more details.
13
+ #
14
+ # You should have received a copy of the GNU General Public License
15
+ # along with this program; if not, write to the Free Software
16
+ # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17
+
18
+ require 'rubygems'
19
+ require 'oneliner'
20
+ require 'archipelago/disco'
21
+ require 'archipelago/hashish'
22
+ require 'archipelago/sanitation'
23
+ require 'pathname'
24
+ require 'bdb'
25
+ require 'drb'
26
+
27
+ module Archipelago
28
+
29
+ module Dump
30
+
31
+ #
32
+ # The server class in the Archipipelago::Dump network.
33
+ #
34
+ # Uses an Archipelago::Sanitation::Officer to keep track of what is
35
+ # needed to be done for redundancy, but Site does the actual work.
36
+ #
37
+ class Site
38
+
39
+ #
40
+ # The minimum pause between checking if keys belong with us.
41
+ #
42
+ CHECK_INTERVAL = 30
43
+
44
+ #
45
+ # The Site can be published.
46
+ #
47
+ include Archipelago::Disco::Publishable
48
+
49
+ attr_accessor :db, :debug_callable, :officer, :persistence_provider
50
+
51
+ def initialize(options = {})
52
+ initialize_publishable(options)
53
+ #
54
+ # The callable object that will get sent our debug messages if it exists.
55
+ #
56
+ @debug_callable = options[:debug_callable]
57
+
58
+ #
59
+ # The provider of checksumming magic and chunk distribution.
60
+ #
61
+ @officer = options[:officer] || (defined?(Archipelago::Sanitation::CLEANER) ? Archipelago::Sanitation::CLEANER : Archipelago::Sanitation::Officer.new)
62
+
63
+ #
64
+ # The database where the data lives.
65
+ #
66
+ @db = @persistence_provider.get_dup_tree("db")
67
+
68
+ #
69
+ # The minimum pause between checking if keys belong with us.
70
+ #
71
+ @check_interval = options[:check_interval] || CHECK_INTERVAL
72
+ end
73
+
74
+ #
75
+ # Will insert the array +values+ under +key+ in the db.
76
+ #
77
+ # Will insert them with the +timestamp+ or "\000\000\000\000".
78
+ #
79
+ # If values with the same timestamp already exists, it will
80
+ # overwrite them if they are greater in number than the given
81
+ # +values+. If they are fewer enough of the given +values+
82
+ # will be inserted to get the same number of values in the
83
+ # database as in +values.
84
+ #
85
+ # Any values with a different timestamp will be deleted.
86
+ #
87
+ def insert!(key, values, timestamp = "\000\000\000\000")
88
+ if (duplicates = @db.duplicates(key)).empty?
89
+ values.each do |value|
90
+ @db[key] = timestamp + value
91
+ end
92
+ else
93
+ my_timestamp = duplicates.first[0...4]
94
+ if timestamp != my_timestamp || duplicates.size > values.size
95
+ @db.env.begin(BDB::TXN_COMMIT, @db) do |txn, db|
96
+ db.delete(key)
97
+ values.each do |value|
98
+ db[key] = timestamp + value
99
+ end
100
+ end
101
+ else
102
+ values[0...(values.size - duplicates.size)].each do |value|
103
+ @db[key] = timestamp + value
104
+ end
105
+ end
106
+ end
107
+ end
108
+
109
+ #
110
+ # Fetches all duplicates of +key+.
111
+ #
112
+ # Returns [[TIMESTAMP0, VALUE0],...,[TIMESTAMPn, VALUEn]]
113
+ #
114
+ def fetch(key)
115
+ values = @db.duplicates(key).collect do |value|
116
+ [value[0...4], value[4..-1]]
117
+ end
118
+ return values
119
+ end
120
+
121
+ #
122
+ # Deletes +key+ from the db.
123
+ #
124
+ def delete!(key)
125
+ return @db.delete(key)
126
+ end
127
+
128
+ private
129
+
130
+ #
131
+ # After we have been published we will subscribe to other Archipelago::Dump::Site
132
+ # changes and start a thread that checks our edge keys.
133
+ #
134
+ def around_publish(&publish_block)
135
+ yield
136
+ @jockey.subscribe(:lost, @officer.service_descriptions[:sites], service_id) do |record|
137
+ lost_peer(record)
138
+ end
139
+ start_edge_check
140
+ end
141
+
142
+ #
143
+ # Before we stop we will unsubscribe from other Archipelago::Dump::Site
144
+ # changes and kill our @edge_check_thread.
145
+ #
146
+ def around_unpublish(&block)
147
+ @jockey.unsubscribe(:lost, @officer.service_descriptions[:sites], service_id)
148
+ @edge_check_thread.kill
149
+ yield
150
+ end
151
+
152
+ #
153
+ # Will ask our Archipelago::Sanitation::Officer to
154
+ # redistribute it.
155
+ #
156
+ # If that succeeded will delete it from our database.
157
+ #
158
+ def redistribute_and_delete(key)
159
+ begin
160
+ @debug_callable.call("#{service_id}.redistribute_and_delete(#{key}) gonna redist and delete") if @debug_callable
161
+ @officer.redistribute(key)
162
+ @db.delete(key)
163
+ @debug_callable.call("#{service_id}.redistribute_and_delete(#{key}) done redistributing") if @debug_callable
164
+ rescue Exception => e
165
+ @debug_callable.call("#{service_id}.redistribute_and_delete(#{key}) got #{e}: #{PP.pp(e.backtrace, "")}") if @debug_callable
166
+ # What shall we do in this case?
167
+ # * Nothing, and wait for the admins to make a manual check?
168
+ # * Send an email someplace?
169
+ # * Store it somewhere so that the manual check goes faster?
170
+ end
171
+ end
172
+
173
+ #
174
+ # Asks our Archipelago::Sanitation::Officer if +key+
175
+ # should reside with us.
176
+ #
177
+ def belongs_here?(key)
178
+ @officer.belongs_at?(self.service_id, key)
179
+ end
180
+
181
+ #
182
+ # Starts a Thread that checks our first and last keys.
183
+ #
184
+ def start_edge_check
185
+ @edge_check_thread = Thread.new do
186
+ loop do
187
+ begin
188
+ @debug_callable.call("#{service_id}.start_edge_check doing its thang") if @debug_callable
189
+ keys_to_check = Set.new
190
+ @db.reverse_each_key do |key|
191
+ break if belongs_here?(key)
192
+ keys_to_check << key
193
+ end
194
+ @db.each_key do |key|
195
+ break if belongs_here?(key)
196
+ keys_to_check << key
197
+ end
198
+ @officer.update_services!(:validate => true) unless keys_to_check.empty?
199
+ keys_to_check.each do |key|
200
+ redistribute_and_delete(key)
201
+ end
202
+ sleep(@check_interval)
203
+ rescue Exception => e
204
+ @debug_callable.call("#{service_id}.start_edge_check() got #{e}: #{PP.pp(e.backtrace, "")}") if @debug_callable
205
+ end
206
+ end
207
+ end
208
+ end
209
+
210
+ #
211
+ # Returns whether we are right after +other_service_id+
212
+ # in the big scheme of things.
213
+ #
214
+ def right_after?(other_service_id)
215
+ @officer.next_to?(other_service_id, service_id)
216
+ end
217
+
218
+ #
219
+ # Returns whether we are right before +other_service_id+
220
+ # in the big scheme of things.
221
+ #
222
+ def right_before?(other_service_id)
223
+ @officer.next_to?(service_id, other_service_id)
224
+ end
225
+
226
+ #
227
+ # Tell the officer to redistribute +key+ and ignore any errors.
228
+ #
229
+ def redistribute(key)
230
+ begin
231
+ @officer.redistribute(key)
232
+ rescue Exception => e
233
+ @debug_callable.call("#{service_id}.redistribute(#{key}) got #{e}: #{PP.pp(e.backtrace, "")}") if @debug_callable
234
+ # What shall we do in this case?
235
+ # * Nothing, and wait for the admins to make a manual check?
236
+ # * Send an email someplace?
237
+ # * Store it somewhere so that the manual check goes faster?
238
+ end
239
+ end
240
+
241
+ #
242
+ # When we have lost a peer +record+ we must check if it was a neighbour of ours.
243
+ #
244
+ # If it was, then we must redistribute its keys.
245
+ #
246
+ # If it was our forward neighbour we will redistribute everything that
247
+ # we hold that we know that it as well held - everything from and including
248
+ # our second to first master upto and including ourselves.
249
+ #
250
+ # If it was our backward neighbour we will redistribute everything that
251
+ # it was master to.
252
+ #
253
+ def lost_peer(record)
254
+ @debug_callable.call("#{service_id}.lost_peer(#{record[:service_id]}) called") if @debug_callable
255
+ keys_to_redistribute = Set.new
256
+ if right_before?(record[:service_id])
257
+ @debug_callable.call("#{service_id}.lost_peer(#{record[:service_id]}) is right_before, redistributing from #{@officer.second_master_to(service_id)}") if @debug_callable
258
+ @db.each_key(@officer.second_master_to(service_id)) do |key|
259
+ keys_to_redistribute << key
260
+ end
261
+ end
262
+ if right_after?(record[:service_id])
263
+ @debug_callable.call("#{service_id}.lost_peer(#{record[:service_id]}) is right_after, redistributing up to #{record[:service_id]}") if @debug_callable
264
+ @db.each_key(@officer.predecessor(record[:service_id])) do |key|
265
+ keys_to_redistribute << key
266
+ break if key > record[:service_id]
267
+ end
268
+ end
269
+ @officer.update_services!(:validate => true)
270
+ keys_to_redistribute.each do |key|
271
+ redistribute(key)
272
+ end
273
+ end
274
+
275
+ end
276
+
277
+ end
278
+
279
+ end
@@ -17,6 +17,7 @@
17
17
 
18
18
  require 'archipelago/current'
19
19
  require 'bdb'
20
+ require 'rbtree'
20
21
 
21
22
  module Archipelago
22
23
 
@@ -26,46 +27,88 @@ module Archipelago
26
27
  module Hashish
27
28
 
28
29
  #
29
- # In essence a Berkeley Database backed Hash.
30
+ # In essence a persistence backed Hash (with certain BTree behaviours).
30
31
  #
31
- # Will cache all values having been written or read
32
- # in a normal Hash cache for fast access.
32
+ # Will cache everything including timestamps for last
33
+ # modification in normal Hashes, but keep everything stored
34
+ # in a persistency backer defined by the subclass.
33
35
  #
34
- # Will save the last update timestamp for all keys
35
- # in a separate Hash cache AND a separate Berkeley Database.
36
- #
37
- class BerkeleyHashish
36
+ module CachedHashish
38
37
  include Archipelago::Current::Synchronized
39
38
  #
40
- # Initialize an instance with the +name+ and BDB::Env +env+.
39
+ # Returns true if what +key+ points to is no longer +value+.
41
40
  #
42
- def initialize(name, env)
43
- super()
44
- @content_db = env.open_db(BDB::HASH, name, "content", BDB::CREATE)
45
- @content = {}
46
- @timestamps_db = env.open_db(BDB::HASH, name, "timestamps", BDB::CREATE | BDB::NOMMAP)
47
- @timestamps = {}
48
- @lock = Archipelago::Current::Lock.new
41
+ def changed?(key, value)
42
+ raise "You have to implement me!"
43
+ end
44
+ #
45
+ # Get the serialized timestamp for +serialized_key+ from the persistent backer.
46
+ #
47
+ def do_get_timestamp_from_db(serialized_key)
48
+ raise "You have to implement me!"
49
+ end
50
+ #
51
+ # Actually writes +key+ serialized as +serialized_key+ an
52
+ # +serialized_value+ to the db, and +timestamp+ to the
53
+ # timestamp db.
54
+ #
55
+ # Used by <b>write_to_db</b>.
56
+ #
57
+ def do_write_to_db(key, serialized_key, serialized_value, timestamp)
58
+ raise "You have to implement me!"
49
59
  end
50
60
  #
51
- # Close the @content_db and @timestamps_db behind this BerkeleyHashish
61
+ # Close the persistent backers behind this CachedHashish.
52
62
  #
53
63
  def close!
54
- @content_db.close
55
- @timestamps_db.close
64
+ raise "You have to implement me!"
56
65
  end
57
66
  #
58
- # Returns a deep ( Marshal.load(Marshal.dump(o)) ) clone
59
- # of the object represented by +key+.
67
+ # Returns whether the persistent backer contains +key+.
60
68
  #
61
- def get_deep_clone(key)
62
- return Marshal.load(@content_db[Marshal.dump(key)])
69
+ def db_include?
70
+ raise "You have to implement me!"
63
71
  end
64
72
  #
65
- # Returns true if this BerkeleyHashish include +key+.
73
+ # Will perform an actual fetching of +serialized_key+ from the persistent backer.
66
74
  #
67
- def include?(key)
68
- @content.include?(key) || !@content_db[Marshal.dump(key)].nil?
75
+ def do_get_from_db(serialized_key)
76
+ raise "You have to implement me!"
77
+ end
78
+ #
79
+ # Actually deletes +serialized_key+ from the persistent backer.
80
+ #
81
+ def do_delete_from_persistence(serialized_key)
82
+ raise "You have to implement me!"
83
+ end
84
+ #
85
+ # Will do +callable+.call(key, value) for each
86
+ # key-and-value pair in this Hashish.
87
+ #
88
+ # NB: This is totaly thread-unsafe, only do this
89
+ # for management or rescue!
90
+ #
91
+ def each(callable)
92
+ raise "You have to implement me!"
93
+ end
94
+ #
95
+ # Get the first key/value pair from our cache tree.
96
+ #
97
+ def first
98
+ @content.first
99
+ end
100
+ #
101
+ # Get the last key/value pair from our cache tree.
102
+ #
103
+ def last
104
+ @content.last
105
+ end
106
+ #
107
+ # Forget the given +key+ in the cache tree.
108
+ #
109
+ def forget(key)
110
+ @content.delete(key)
111
+ @timestamps.delete(key)
69
112
  end
70
113
  #
71
114
  # Simply get the value for the +key+.
@@ -76,7 +119,7 @@ module Archipelago
76
119
  # if the value didnt exist in the live hash yet.
77
120
  #
78
121
  def [](key)
79
- @lock.synchronize_on(key) do
122
+ synchronize_on(key) do
80
123
 
81
124
  value = @content[key]
82
125
  return value if value
@@ -85,6 +128,14 @@ module Archipelago
85
128
 
86
129
  end
87
130
  end
131
+ def without_lock(&block)
132
+ @unlocked_thread = Thread.current
133
+ begin
134
+ yield
135
+ ensure
136
+ @unlocked_thread = nil
137
+ end
138
+ end
88
139
  #
89
140
  # Insert +value+ under +key+.
90
141
  #
@@ -93,7 +144,7 @@ module Archipelago
93
144
  # <b>value.respond_to?(:save_hook)</b>.
94
145
  #
95
146
  def []=(key, value)
96
- @lock.synchronize_on(key) do
147
+ synchronize_on(key, @unlocked_thread != Thread.current) do
97
148
 
98
149
  @content[key] = value
99
150
 
@@ -114,7 +165,7 @@ module Archipelago
114
165
  # a save is actually performed.
115
166
  #
116
167
  def store_if_changed(key)
117
- @lock.synchronize_on(key) do
168
+ synchronize_on(key) do
118
169
 
119
170
  value = @content[key]
120
171
 
@@ -131,64 +182,88 @@ module Archipelago
131
182
 
132
183
  serialized_value = Marshal.dump(value)
133
184
  serialized_key = Marshal.dump(key)
134
- old_serialized_value = @content_db[serialized_key]
135
-
136
- write_to_db(key, serialized_key, serialized_value, value) if old_serialized_value && old_serialized_value != serialized_value
185
+ write_to_db(key, serialized_key, serialized_value, value) if changed?(serialized_key, serialized_value)
137
186
 
138
187
  end
139
188
 
140
189
  end
141
190
  end
142
191
  #
143
- # Returns the last time the value under +key+ was changed.
192
+ # Returns a deep ( Marshal.load(Marshal.dump(o)) ) clone
193
+ # of the object represented by +key+.
144
194
  #
145
- def timestamp(key)
146
- @lock.synchronize_on(key) do
147
-
148
- timestamp = @timestamps[key]
149
- return timestamp if timestamp
150
-
151
- serialized_key = Marshal.dump(key)
152
- serialized_timestamp = @timestamps_db[serialized_key]
153
- return nil unless serialized_timestamp
154
-
155
- timestamp = Marshal.load(serialized_timestamp)
156
- @timestamps[key] = timestamp
157
- return timestamp
158
-
159
- end
195
+ def get_deep_clone(key)
196
+ return Marshal.load(Marshal.dump(@content[key]))
160
197
  end
161
198
  #
162
- # Will do +callable+.call(key, value) for each
163
- # key-and-value pair in this Hashish.
164
- #
165
- # NB: This is totaly thread-unsafe, only do this
166
- # for management or rescue!
199
+ # Initializes the cache hashes for this CachedHashish.
167
200
  #
168
- def each(callable)
169
- @content_db.each do |serialized_key, serialized_value|
170
- key = Marshal.load(serialized_key)
171
- callable.call(key, self.[](key))
172
- end
201
+ def initialize_cached_hashish
202
+ @content = RBTree.new
203
+ @timestamps = {}
204
+ @unlocked_thread = nil
173
205
  end
174
206
  #
175
207
  # Delete +key+ and its value and timestamp.
176
208
  #
177
209
  def delete(key)
178
- @lock.synchronize_on(key) do
210
+ synchronize_on(key, @unlocked_thread != Thread.current) do
179
211
 
180
212
  serialized_key = Marshal.dump(key)
181
213
 
182
214
  @content.delete(key)
183
- @content_db[serialized_key] = nil
184
215
  @timestamps.delete(key)
185
- @timestamps_db[serialized_key] = nil
186
-
216
+ do_delete_from_persistence(serialized_key)
187
217
  end
188
218
  end
219
+ #
220
+ # Read +key+ from db and if it is found
221
+ # put it in the cache Hash.
222
+ #
223
+ # Will call <b>value.load_hook</b> and send it
224
+ # a block that does the actuall insertion of the value
225
+ # into the live hash if <b>value.respond_to?(:load_hook)</b>.
226
+ #
227
+ def get_from_db(key)
228
+ serialized_key = Marshal.dump(key)
229
+ serialized_value = do_get_from_db(serialized_key)
230
+ return nil unless serialized_value
231
+
232
+ value = Marshal.load(serialized_value)
233
+ if value.respond_to?(:load_hook)
234
+ value.load_hook do
235
+ @content[key] = value
236
+ end
237
+ else
238
+ @content[key] = value
239
+ end
240
+ return value
241
+ end
242
+ #
243
+ # Returns true if this BerkeleyHashish include +key+.
244
+ #
245
+ def include?(key)
246
+ @content.include?(key) || db_include?(key)
247
+ end
248
+ #
249
+ # Returns the last time the value under +key+ was changed.
250
+ #
251
+ def timestamp(key)
252
+ synchronize_on(key) do
253
+
254
+ timestamp = @timestamps[key]
255
+ return timestamp if timestamp
256
+
257
+ serialized_key = Marshal.dump(key)
258
+ serialized_timestamp = do_get_timestamp_from_db(serialized_key)
259
+ return nil unless serialized_timestamp
260
+
261
+ timestamp = Marshal.load(serialized_timestamp)
262
+ @timestamps[key] = timestamp
263
+ return timestamp
189
264
 
190
- private
191
-
265
+ end
266
+ end
192
267
  #
193
268
  # Write +key+, serialized as +serialized_key+ and
194
269
  # +serialized_value+ to the db.
@@ -199,50 +274,113 @@ module Archipelago
199
274
  #
200
275
  def write_to_db(key, serialized_key, serialized_value, value)
201
276
  if value.respond_to?(:save_hook)
202
- old_serialized_value = @content_db[serialized_key]
277
+ old_serialized_value = do_get_from_db(serialized_key)
203
278
  old_value = old_serialized_value ? Marshal.load(old_serialized_value) : nil
204
279
  value.save_hook(old_value) do
205
- do_write_to_db(key, serialized_key, serialized_value)
280
+ now = Time.now
281
+ do_write_to_db(key, serialized_key, serialized_value, now)
282
+ @timestamps[key] = now
206
283
  end
207
284
  else
208
- do_write_to_db(key, serialized_key, serialized_value)
285
+ now = Time.now
286
+ do_write_to_db(key, serialized_key, serialized_value, now)
287
+ @timestamps[key] = now
209
288
  end
210
289
  end
290
+ end
211
291
 
292
+ #
293
+ # An Archipelago::Dump network backed CachedHashish.
294
+ #
295
+ class DumpHashish
296
+ include Archipelago::Hashish::CachedHashish
212
297
  #
213
- # Actually writes +key+ serialized as +serialized_key+ an
214
- # +serialized_value+ to the db. Used by <b>write_to_db</b>.
298
+ # Initialize a new DumpHashish with the given +officer+ to find Dumps.
215
299
  #
216
- def do_write_to_db(key, serialized_key, serialized_value)
217
- now = Time.now
218
-
219
- @content_db[serialized_key] = serialized_value
220
- @timestamps_db[serialized_key] = Marshal.dump(now)
221
- @timestamps[key] = now
300
+ def initialize(officer)
301
+ super()
302
+ initialize_cached_hashish
303
+ @officer = officer
304
+ @hash_by_key = {}
222
305
  end
223
-
224
- #
225
- # Read +key+ from db and if it is found
226
- # put it in the cache Hash.
306
+ def changed?(serialized_key, serialized_value)
307
+ return (old_hash = @hash_by_key[serialized_key]) && old_hash != Digest::SHA1.hexdigest(serialized_value)
308
+ end
309
+ def do_get_timestamp_from_db(serialized_key)
310
+ return @officer[officer_key(serialized_key, "timestamp")]
311
+ end
312
+ def do_write_to_db(key, serialized_key, serialized_value, now)
313
+ @officer[officer_key(serialized_key, "content")] = serialized_value
314
+ @officer[officer_key(serialized_key, "timestamp")] = Marshal.dump(now)
315
+ @hash_by_key[serialized_key] = Digest::SHA1.hexdigest(serialized_value)
316
+ end
317
+ def close!
318
+ end
319
+ def officer_key(serialized_key, space)
320
+ Digest::SHA1.hexdigest("Archipelago::Hashish::DumpHashish:#{space}:#{serialized_key}")
321
+ end
322
+ def db_include?(key)
323
+ return !@officer[officer_key(Marshal.dump(key), "content")].nil?
324
+ end
325
+ def do_get_from_db(serialized_key)
326
+ serialized_value = @officer[officer_key(serialized_key, "content")]
327
+ @hash_by_key[serialized_key] = Digest::SHA1.hexdigest(serialized_value) if serialized_value
328
+ return serialized_value
329
+ end
330
+ def do_delete_from_persistence(serialized_key)
331
+ @officer.delete(officer_key(serialized_key, "content"))
332
+ @officer.delete(officer_key(serialized_key, "timestamp"))
333
+ @hash_by_key[serialized_key] = Digest::SHA1.hexdigest(serialized_value)
334
+ end
335
+ def each(callable)
336
+ raise "You have to implement me!"
337
+ end
338
+ end
339
+
340
+ #
341
+ # A CachedHashish backed by a few BDB::Hashes.
342
+ #
343
+ class BerkeleyHashish
344
+ include Archipelago::Hashish::CachedHashish
227
345
  #
228
- # Will call <b>value.load_hook</b> and send it
229
- # a block that does the actuall insertion of the value
230
- # into the live hash if <b>value.respond_to?(:load_hook)</b>.
346
+ # Initialize an instance with the +name+ and BDB::Env +env+.
231
347
  #
232
- def get_from_db(key)
233
- serialized_key = Marshal.dump(key)
234
- serialized_value = @content_db[serialized_key]
235
- return nil unless serialized_value
236
-
237
- value = Marshal.load(serialized_value)
238
- if value.respond_to?(:load_hook)
239
- value.load_hook do
240
- @content[key] = value
241
- end
242
- else
243
- @content[key] = value
348
+ def initialize(name, env)
349
+ super()
350
+ initialize_cached_hashish
351
+ @content_db = env.open_db(BDB::HASH, name, "content", BDB::CREATE)
352
+ @timestamps_db = env.open_db(BDB::HASH, name, "timestamps", BDB::CREATE | BDB::NOMMAP)
353
+ end
354
+ def close!
355
+ @content_db.close
356
+ @timestamps_db.close
357
+ end
358
+ def each(callable)
359
+ @content_db.each do |serialized_key, serialized_value|
360
+ key = Marshal.load(serialized_key)
361
+ callable.call(key, self.[](key))
244
362
  end
245
- return value
363
+ end
364
+ def do_get_timestamp_from_db(serialized_key)
365
+ return @timestamps_db[serialized_key]
366
+ end
367
+ def do_delete_from_persistence(serialized_key)
368
+ @timestamps_db[serialized_key] = nil
369
+ @content_db[serialized_key] = nil
370
+ end
371
+ def changed?(serialized_key, serialized_value)
372
+ old_serialized_value = @content_db[serialized_key]
373
+ return old_serialized_value && old_serialized_value != serialized_value
374
+ end
375
+ def do_write_to_db(key, serialized_key, serialized_value, now)
376
+ @content_db[serialized_key] = serialized_value
377
+ @timestamps_db[serialized_key] = Marshal.dump(now)
378
+ end
379
+ def db_include?(key)
380
+ return !@content_db[Marshal.dump(key)].nil?
381
+ end
382
+ def do_get_from_db(serialized_key)
383
+ return @content_db[serialized_key]
246
384
  end
247
385
  end
248
386
 
@@ -256,30 +394,48 @@ module Archipelago
256
394
  #
257
395
  def initialize(env_path)
258
396
  env_path.mkpath
259
- @env = BDB::Env.open(env_path, BDB::CREATE | BDB::INIT_MPOOL)
397
+ @env = BDB::Env.open(env_path, BDB::CREATE | BDB::INIT_MPOOL | BDB::INIT_TRANSACTION)
260
398
  @berkeley_hashishes = []
261
399
  @bdb_dbs = []
262
400
  end
263
401
  #
264
- # Returns a cleverly cached (but slightly inefficient)
265
- # hash-like instance (see Archipelago::Hashish::BerkeleyHashish)
266
- # using +name+.
402
+ # Get a CachedHashish either backed by a Berkeley Database (if +:name+ is given)
403
+ # or by an Archipelago::Sanitation network (if +:officer+ is given).
404
+ #
405
+ def get_cached_hashish(options)
406
+ if options[:officer]
407
+ hashish = DumpHashish.new(options[:officer])
408
+ @berkeley_hashishes << hashish
409
+ return hashish
410
+ elsif options[:name]
411
+ hashish = BerkeleyHashish.new(options[:name], @env)
412
+ @berkeley_hashishes << hashish
413
+ return hashish
414
+ end
415
+ end
416
+ #
417
+ # Get a BDB::Btree allowing duplicates with +name+ and +flags+.
267
418
  #
268
- def get_cached_hashish(name)
269
- hashish = BerkeleyHashish.new(name, @env)
270
- @berkeley_hashishes << hashish
271
- return hashish
419
+ def get_dup_tree(name, flags = BDB::CREATE | BDB::NOMMAP)
420
+ db = BDB::Btree.open(Pathname.new(File.join(@env.home, name)).expand_path,
421
+ nil,
422
+ flags,
423
+ 0,
424
+ "env" => @env,
425
+ "set_flags" => BDB::DUP)
426
+ @bdb_dbs << db
427
+ return db
272
428
  end
273
429
  #
274
- # Returns a normal hash-like instance using +name+.
430
+ # Get a BDB::Hash with +name+ and +flags+.
275
431
  #
276
- def get_hashish(name)
277
- db = @env.open_db(BDB::HASH, name, nil, BDB::CREATE | BDB::NOMMAP)
432
+ def get_hashish(name, flags = BDB::CREATE | BDB::NOMMAP)
433
+ db = @env.open_db(BDB::HASH, name, nil, flags)
278
434
  @bdb_dbs << db
279
435
  return db
280
436
  end
281
437
  #
282
- # Closes databases opened by this instance.
438
+ # Close all our databases.
283
439
  #
284
440
  def close!
285
441
  @berkeley_hashishes.each do |h|
@@ -290,11 +446,11 @@ module Archipelago
290
446
  end
291
447
  end
292
448
  #
293
- # Closes databases opened by this instance and removes the persistent files.
449
+ # Close and remove all our databases.
294
450
  #
295
451
  def unlink!
296
452
  close!
297
- home = Pathname.new(@env.home)
453
+ home = Pathname.new(@env.home).expand_path
298
454
  @env.close
299
455
  home.rmtree if home.exist?
300
456
  end