adhd 0.0.1 → 0.1.0
Sign up to get free protection for your applications and to get access to all the features.
- data/README.rdoc +62 -2
- data/VERSION +1 -1
- data/adhd.gemspec +10 -8
- data/bin/adhd +23 -8
- data/bin/adhd_cleanup +57 -0
- data/lib/adhd/adhd_rest_server.rb +229 -0
- data/lib/adhd/config.yml +1 -1
- data/lib/adhd/models/content_doc.rb +17 -0
- data/lib/adhd/models/content_shard.rb +97 -0
- data/lib/adhd/models/node_doc.rb +139 -0
- data/lib/adhd/models/shard_range.rb +202 -0
- data/lib/adhd/node_manager.rb +260 -0
- data/lib/adhd/reactor.rb +194 -12
- data/test/test_adhd.rb +0 -11
- metadata +11 -7
- data/lib/adhd.rb +0 -120
- data/lib/adhd/models.rb +0 -388
- data/lib/adhd/node.rb +0 -13
- data/models.rb +0 -19
data/test/test_adhd.rb
CHANGED
@@ -1,12 +1,3 @@
|
|
1
|
-
<<<<<<< HEAD:test/test_adhd.rb
|
2
|
-
require 'helper'
|
3
|
-
|
4
|
-
class TestAdhd < Test::Unit::TestCase
|
5
|
-
should "probably rename this file and start testing for real" do
|
6
|
-
flunk "hey buddy, you should probably rename this file and start testing for real"
|
7
|
-
end
|
8
|
-
end
|
9
|
-
=======
|
10
1
|
require 'rubygems'
|
11
2
|
require 'test/unit'
|
12
3
|
require 'shoulda'
|
@@ -80,5 +71,3 @@ class TestAdhd < Test::Unit::TestCase
|
|
80
71
|
private
|
81
72
|
|
82
73
|
end
|
83
|
-
|
84
|
-
>>>>>>> 15388c36a078b8eee9bc17c79501985e54be519b:test/test_adhd.rb
|
metadata
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: adhd
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.0
|
4
|
+
version: 0.1.0
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- dave.hrycyszyn@headlondon.com
|
@@ -9,8 +9,8 @@ autorequire:
|
|
9
9
|
bindir: bin
|
10
10
|
cert_chain: []
|
11
11
|
|
12
|
-
date: 2009-12-
|
13
|
-
default_executable:
|
12
|
+
date: 2009-12-27 00:00:00 +00:00
|
13
|
+
default_executable:
|
14
14
|
dependencies:
|
15
15
|
- !ruby/object:Gem::Dependency
|
16
16
|
name: thoughtbot-shoulda
|
@@ -66,6 +66,7 @@ description: "More to say when something works! Do not bother installing this! "
|
|
66
66
|
email: dave.hrycyszyn@headlondon.com
|
67
67
|
executables:
|
68
68
|
- adhd
|
69
|
+
- adhd_cleanup
|
69
70
|
extensions: []
|
70
71
|
|
71
72
|
extra_rdoc_files:
|
@@ -80,11 +81,15 @@ files:
|
|
80
81
|
- VERSION
|
81
82
|
- adhd.gemspec
|
82
83
|
- bin/adhd
|
84
|
+
- bin/adhd_cleanup
|
83
85
|
- doc/adhd.xmi
|
84
|
-
- lib/adhd.rb
|
86
|
+
- lib/adhd/adhd_rest_server.rb
|
85
87
|
- lib/adhd/config.yml
|
86
|
-
- lib/adhd/models.rb
|
87
|
-
- lib/adhd/
|
88
|
+
- lib/adhd/models/content_doc.rb
|
89
|
+
- lib/adhd/models/content_shard.rb
|
90
|
+
- lib/adhd/models/node_doc.rb
|
91
|
+
- lib/adhd/models/shard_range.rb
|
92
|
+
- lib/adhd/node_manager.rb
|
88
93
|
- lib/adhd/reactor.rb
|
89
94
|
- lib/ext/hash_to_openstruct.rb
|
90
95
|
- lib/public/images/img01.jpg
|
@@ -106,7 +111,6 @@ files:
|
|
106
111
|
- lib/public/style.css
|
107
112
|
- lib/views/index.erb
|
108
113
|
- lib/views/layout.erb
|
109
|
-
- models.rb
|
110
114
|
- test/helper.rb
|
111
115
|
- test/test_adhd.rb
|
112
116
|
has_rdoc: true
|
data/lib/adhd.rb
DELETED
@@ -1,120 +0,0 @@
|
|
1
|
-
require 'rubygems'
|
2
|
-
require 'sinatra'
|
3
|
-
require 'couchrest'
|
4
|
-
require 'erb'
|
5
|
-
require 'ruby-debug'
|
6
|
-
require File.dirname(__FILE__) + '/adhd/models'
|
7
|
-
|
8
|
-
# Start the server for now by cd'ing into the /lib directory and running the
|
9
|
-
# following command:
|
10
|
-
#
|
11
|
-
# (first node):
|
12
|
-
# ruby adhd.rb <node_name> <couchdb_server_url>
|
13
|
-
#
|
14
|
-
# (second or later node)
|
15
|
-
# ruby adhd.rb <node_name> <couchdb_server_url> <management_node_url> <management_node_db> -p <port_number>
|
16
|
-
#
|
17
|
-
# <node_name> is just a string, e.g. "foo".
|
18
|
-
# <couchdb_server_url>: the url (including port) for this node's couchdb server
|
19
|
-
# instance, e.g, http://192.168.1.104:5984
|
20
|
-
# <management_node_url>: the url of the management node where this node should
|
21
|
-
# initially replicate from, e.g. http://192.168.1.104:5984
|
22
|
-
# <management_node_db>: the couchdb management node database, e.g. "bar_node_db"
|
23
|
-
# <port_number>: a port number to run on. If you're running more than one node locally
|
24
|
-
# for development purposes you'll need to pick a non-default port higher than 1024.
|
25
|
-
|
26
|
-
node_name = ARGV[1]
|
27
|
-
node_url = ARGV[2]
|
28
|
-
buddy_server_url = ARGV[3]
|
29
|
-
buddy_db = ARGV[4]
|
30
|
-
|
31
|
-
NODESERVER = CouchRest.new("#{node_url}")
|
32
|
-
NODESERVER.default_database = "#{node_name}_node_db"
|
33
|
-
node_db = CouchRest::Database.new(NODESERVER, "#{node_name}_node_db")
|
34
|
-
|
35
|
-
# sync the db with our buddy
|
36
|
-
if buddy_server_url && buddy_db
|
37
|
-
buddy_server = CouchRest.new("#{buddy_server_url}")
|
38
|
-
buddy_db = CouchRest::Database.new(buddy_server, buddy_db + "_node_db")
|
39
|
-
node_db.replicate_from(buddy_db)
|
40
|
-
end
|
41
|
-
|
42
|
-
# Retrieve our own node by our name
|
43
|
-
# If there are other nodes with the name kill their records!
|
44
|
-
node_candidates = Node.by_name(:key => node_name)
|
45
|
-
node = node_candidates.pop
|
46
|
-
node = Node.new if node.nil?
|
47
|
-
node_candidates.each do |other_me|
|
48
|
-
other_me.destroy # destroy other records
|
49
|
-
end
|
50
|
-
|
51
|
-
# Update our very own record
|
52
|
-
node.name = node_name
|
53
|
-
node.url = node_url
|
54
|
-
node.status = "RUNNING"
|
55
|
-
node.save
|
56
|
-
|
57
|
-
# We check if we are the first node. If we are the first node, we set ourself up
|
58
|
-
# as the management node.
|
59
|
-
all_nodes = Node.by_name()
|
60
|
-
if all_nodes.length == 1
|
61
|
-
# puts "Setup #{node.name} as management node"
|
62
|
-
node.is_management = 3
|
63
|
-
node.save
|
64
|
-
end
|
65
|
-
|
66
|
-
# Lets build a nice NodeDB
|
67
|
-
ndb = NodeDB.new(node)
|
68
|
-
|
69
|
-
# Lets build a nice ShardDB
|
70
|
-
srdb = ShardRangeDB.new(ndb)
|
71
|
-
|
72
|
-
# If there are no shards make a few, if we are managers
|
73
|
-
#puts "Create new ranges?"
|
74
|
-
#puts "How many shards: #{ShardRange.by_range_start.length}"
|
75
|
-
#puts "in #{ShardRange::SHARDSERVER.default_database}"
|
76
|
-
if ShardRange.by_range_start.length == 0 && node.is_management
|
77
|
-
puts "Creating new ranges"
|
78
|
-
srdb.build_shards(100)
|
79
|
-
end
|
80
|
-
|
81
|
-
# Polulate the shards with some nodes at random
|
82
|
-
node_names = []
|
83
|
-
all_nodes.each do |anode|
|
84
|
-
node_names << anode.name
|
85
|
-
end
|
86
|
-
|
87
|
-
ShardRange.by_range_start.each do |s|
|
88
|
-
if !s.node_list or s.node_list.length == 0
|
89
|
-
node_names.shuffle!
|
90
|
-
s.node_list = node_names[0..2]
|
91
|
-
s.master_node = node_names[0]
|
92
|
-
s.save
|
93
|
-
end
|
94
|
-
|
95
|
-
end
|
96
|
-
# Sync all the node databases
|
97
|
-
|
98
|
-
ndb.sync # SYNC
|
99
|
-
srdb.sync # SYNC
|
100
|
-
|
101
|
-
srdb.get_content_shards.each do |content_shard_db|
|
102
|
-
content_shard_db.sync
|
103
|
-
end
|
104
|
-
|
105
|
-
get "/" do
|
106
|
-
@all_nodes = Node.by_name
|
107
|
-
erb :index
|
108
|
-
end
|
109
|
-
|
110
|
-
get "/sync" do
|
111
|
-
# Sync the node database
|
112
|
-
ndb.sync
|
113
|
-
# Sync the shard database
|
114
|
-
srdb.sync
|
115
|
-
|
116
|
-
srdb.get_content_shards.each do |content_shard_db|
|
117
|
-
content_shard_db.sync
|
118
|
-
end
|
119
|
-
end
|
120
|
-
|
data/lib/adhd/models.rb
DELETED
@@ -1,388 +0,0 @@
|
|
1
|
-
# Key Restrictions ok internal_IDs: must only contain [a-z0-9-]
|
2
|
-
|
3
|
-
class Array
|
4
|
-
def shuffle!
|
5
|
-
size.downto(1) { |n| push delete_at(rand(n)) }
|
6
|
-
self
|
7
|
-
end
|
8
|
-
end
|
9
|
-
|
10
|
-
class NodeDB
|
11
|
-
|
12
|
-
attr_accessor :local_node_db, :our_node
|
13
|
-
|
14
|
-
def initialize(our_nodev)
|
15
|
-
@our_node = our_nodev
|
16
|
-
|
17
|
-
# Get the address of the CDB from the node
|
18
|
-
@local_node_db = our_nodev.get_node_db
|
19
|
-
end
|
20
|
-
|
21
|
-
def sync
|
22
|
-
# We replicate our state to the management node(s)
|
23
|
-
management_nodes = Node.by_is_management.reverse
|
24
|
-
# NOTE: randomize the order for load balancing here
|
25
|
-
|
26
|
-
# NOTE2: How to build skynet (TODO)
|
27
|
-
# -------------------
|
28
|
-
# If length of management is zero, then chose 3 different random
|
29
|
-
# nodes at each sync, and sync with them in node_name order.
|
30
|
-
# This guarantees that any updates on nodes are communicated in
|
31
|
-
# O(log N) ephocs, at the cost of O(3 * N) connections per epoch.
|
32
|
-
# It also guarantees any new management servers are discovered in
|
33
|
-
# this O(log N) time, creating "jelly fish" or "partition proof"
|
34
|
-
# availability.
|
35
|
-
|
36
|
-
management_nodes.each do |mng_node|
|
37
|
-
remote_db = mng_node.get_node_db
|
38
|
-
if !(mng_node.name == our_node.name)
|
39
|
-
begin
|
40
|
-
puts "Sync NodeDB with #{mng_node.name}"
|
41
|
-
local_node_db.replicate_from(remote_db)
|
42
|
-
# TODO: Manage conflicts here
|
43
|
-
local_node_db.replicate_to(remote_db)
|
44
|
-
break if !our_node.is_management # Only need to contact one node
|
45
|
-
rescue
|
46
|
-
puts "Could not connect to DB node #{mng_node.name}"
|
47
|
-
# TODO: change status or chose another management server
|
48
|
-
mng_node.status = "UNAVAILABLE"
|
49
|
-
mng_node.save
|
50
|
-
end
|
51
|
-
end
|
52
|
-
end
|
53
|
-
end
|
54
|
-
|
55
|
-
end
|
56
|
-
|
57
|
-
class Node < CouchRest::ExtendedDocument
|
58
|
-
NODESERVER = CouchRest.new("#{ARGV[1]}")
|
59
|
-
NODESERVER.default_database = "#{ARGV[0]}_node_db"
|
60
|
-
|
61
|
-
use_database NODESERVER.default_database
|
62
|
-
|
63
|
-
property :name
|
64
|
-
property :url
|
65
|
-
property :is_store
|
66
|
-
property :is_management
|
67
|
-
property :is_directory
|
68
|
-
property :status
|
69
|
-
|
70
|
-
timestamps!
|
71
|
-
|
72
|
-
view_by :name
|
73
|
-
view_by :is_management
|
74
|
-
|
75
|
-
def get_node_db
|
76
|
-
server = CouchRest.new("#{url}")
|
77
|
-
server.database!("#{name}_node_db")
|
78
|
-
end
|
79
|
-
|
80
|
-
def get_shard_db
|
81
|
-
server = CouchRest.new("#{url}")
|
82
|
-
server.database!("#{name}_shard_db")
|
83
|
-
end
|
84
|
-
|
85
|
-
def get_content_db(shard_db_name)
|
86
|
-
server = CouchRest.new("#{url}")
|
87
|
-
server.database!("#{name}_#{shard_db_name}_content_db")
|
88
|
-
end
|
89
|
-
end
|
90
|
-
|
91
|
-
class ShardRangeDB
|
92
|
-
|
93
|
-
attr_accessor :nodes, :local_shard_db, :our_node
|
94
|
-
|
95
|
-
def initialize(nodesv)
|
96
|
-
@nodes = nodesv
|
97
|
-
|
98
|
-
# Automatically get our shard_db address from our own node name
|
99
|
-
@our_node = nodesv.our_node
|
100
|
-
@local_shard_db = nodesv.our_node.get_shard_db
|
101
|
-
end
|
102
|
-
|
103
|
-
def sync
|
104
|
-
# We replicate our state from the management node(s)
|
105
|
-
# We never push content if we are only storage
|
106
|
-
management_nodes = Node.by_is_management.reverse
|
107
|
-
# NOTE: randomize the order for load balancing here
|
108
|
-
|
109
|
-
|
110
|
-
management_nodes.each do |mng_node|
|
111
|
-
remote_db = mng_node.get_shard_db
|
112
|
-
if !(mng_node.name == our_node.name)
|
113
|
-
begin
|
114
|
-
puts "Sync ShardRange DB pull from #{mng_node.name}"
|
115
|
-
local_shard_db.replicate_from(remote_db)
|
116
|
-
# TODO: Manage conflicts here
|
117
|
-
if our_node.is_management
|
118
|
-
# Push any changes to other management nodes
|
119
|
-
puts "Sync ShardRange DB pushto #{mng_node.name}"
|
120
|
-
local_shard_db.replicate_to(remote_db)
|
121
|
-
else
|
122
|
-
break # sync with only one management server
|
123
|
-
end
|
124
|
-
rescue
|
125
|
-
puts "Could not connect to DB node #{mng_node.name}"
|
126
|
-
# TODO: change status or chose another management server
|
127
|
-
mng_node.status = "UNAVAILABLE"
|
128
|
-
mng_node.save
|
129
|
-
end
|
130
|
-
end
|
131
|
-
end
|
132
|
-
end
|
133
|
-
|
134
|
-
def build_shards(number)
|
135
|
-
# Make a large list of possible id boundaries
|
136
|
-
characters = []
|
137
|
-
("0".."9").each do |c|
|
138
|
-
characters << c
|
139
|
-
end
|
140
|
-
("a".."z").each do |c|
|
141
|
-
characters << c
|
142
|
-
end
|
143
|
-
|
144
|
-
# Generate 36 x 36 keys to choose boundaries from
|
145
|
-
all_keys = []
|
146
|
-
characters.each do |c1|
|
147
|
-
characters.each do |c2|
|
148
|
-
all_keys << (c1+c2)
|
149
|
-
end
|
150
|
-
end
|
151
|
-
|
152
|
-
# Now chose our boundaries
|
153
|
-
num_range_keys = all_keys.length
|
154
|
-
approx_shard_size = (num_range_keys * 1.0) / number
|
155
|
-
|
156
|
-
shard_starts = []
|
157
|
-
(0...number).each do |n|
|
158
|
-
shard_starts << (all_keys[(n * approx_shard_size).floor])
|
159
|
-
end
|
160
|
-
|
161
|
-
shard_ends = shard_starts.clone
|
162
|
-
shard_ends << ("z" * 100)
|
163
|
-
shard_ends.delete_at(0)
|
164
|
-
|
165
|
-
# Finally build them!
|
166
|
-
puts "Build Shards"
|
167
|
-
(0...number).each do |n|
|
168
|
-
puts "Shard #{n}: from #{shard_starts[n]} to #{shard_ends[n]}"
|
169
|
-
shard_name = "sh_#{shard_starts[n]}_to_#{shard_ends[n]}"
|
170
|
-
sr = ShardRange.new
|
171
|
-
sr.range_start = shard_starts[n]
|
172
|
-
sr.range_end = shard_ends[n]
|
173
|
-
sr.shard_db_name = shard_name
|
174
|
-
sr.save
|
175
|
-
end
|
176
|
-
end
|
177
|
-
|
178
|
-
def get_shard(internal_id)
|
179
|
-
# Finds the list of shards within which this ID lives
|
180
|
-
all_shards = ShardRange.by_range_start
|
181
|
-
selected_shards = []
|
182
|
-
all_shards.each do |a_shard| # TODO: linear search is inefficient -- create a view
|
183
|
-
if a_shard.range_start <= internal_id && a_shard.range_end > internal_id
|
184
|
-
selected_shards << a_shard
|
185
|
-
end
|
186
|
-
end
|
187
|
-
selected_shards
|
188
|
-
end
|
189
|
-
|
190
|
-
def get_content_shards
|
191
|
-
# Return the content_shards of our node
|
192
|
-
content_shards = []
|
193
|
-
ShardRange.by_node(:key => "node1").each do |s|
|
194
|
-
|
195
|
-
# Build a content shard object
|
196
|
-
cs = ContentShard.new
|
197
|
-
cs.our_node = our_node
|
198
|
-
cs.this_shard = s
|
199
|
-
cs.nodes = nodes
|
200
|
-
cs.this_shard_db = our_node.get_content_db(s.shard_db_name)
|
201
|
-
|
202
|
-
# add it to the list
|
203
|
-
content_shards << cs
|
204
|
-
end
|
205
|
-
content_shards
|
206
|
-
end
|
207
|
-
|
208
|
-
def write_doc_directly(content_doc)
|
209
|
-
# Write a document directly to a nodes content repository
|
210
|
-
doc_shard = get_shard(content_doc.internal_id).first
|
211
|
-
doc_shard.get_nodes.each do |node|
|
212
|
-
# Try to write the doc to this node
|
213
|
-
begin
|
214
|
-
remote_ndb = NodeDB.new(node)
|
215
|
-
remote_content_shard = ContentShard.new(remote_ndb, doc_shard)
|
216
|
-
remote_content_shard.this_shard_db.save_doc(content_doc)
|
217
|
-
break
|
218
|
-
rescue
|
219
|
-
puts "Could not put doc in node #{node.name}"
|
220
|
-
# TODO: change status or chose another management server
|
221
|
-
node.status = "UNAVAILABLE"
|
222
|
-
node.save
|
223
|
-
|
224
|
-
end
|
225
|
-
end
|
226
|
-
|
227
|
-
end
|
228
|
-
|
229
|
-
def get_doc_directly(internal_id)
|
230
|
-
# Write a document directly to a nodes content repository
|
231
|
-
doc_shard = get_shard(internal_id).first
|
232
|
-
|
233
|
-
# TODO: Randomize the order of nodes for load balancing in retrieval!
|
234
|
-
docx = []
|
235
|
-
doc_shard.get_nodes.each do |node|
|
236
|
-
# Try to write the doc to this node
|
237
|
-
begin
|
238
|
-
remote_ndb = NodeDB.new(node)
|
239
|
-
remote_content_shard = ContentShard.new(remote_ndb, doc_shard)
|
240
|
-
|
241
|
-
docx = ContentDoc.by_internal_id(:key => internal_id, :database => remote_content_shard.this_shard_db)
|
242
|
-
if docx.length > 0
|
243
|
-
break
|
244
|
-
end
|
245
|
-
rescue
|
246
|
-
puts "Could not put doc in node #{node.name}"
|
247
|
-
# TODO: change status or chose another management server
|
248
|
-
node.status = "UNAVAILABLE"
|
249
|
-
node.save
|
250
|
-
end
|
251
|
-
end
|
252
|
-
docx
|
253
|
-
end
|
254
|
-
|
255
|
-
end
|
256
|
-
|
257
|
-
class ShardRange < CouchRest::ExtendedDocument
|
258
|
-
SHARDSERVER = CouchRest.new("#{ARGV[1]}")
|
259
|
-
SHARDSERVER.default_database = "#{ARGV[0]}_shard_db"
|
260
|
-
|
261
|
-
use_database SHARDSERVER.default_database
|
262
|
-
|
263
|
-
property :range_start
|
264
|
-
property :range_end
|
265
|
-
property :node_list
|
266
|
-
property :master_node
|
267
|
-
property :shard_db_name
|
268
|
-
|
269
|
-
view_by :range_start
|
270
|
-
|
271
|
-
# View "node" - given a node returns the shards watched
|
272
|
-
# How to use this new
|
273
|
-
#
|
274
|
-
# puts "Which Shards does 'node1' watch?"
|
275
|
-
# ShardRange.by_node(:key => "node1").each do |s|
|
276
|
-
# puts "Shard: #{s.shard_db_name}"
|
277
|
-
# end
|
278
|
-
|
279
|
-
|
280
|
-
view_by :node,
|
281
|
-
:map =>
|
282
|
-
"function(doc) {
|
283
|
-
if (doc['couchrest-type'] == 'ShardRange' && doc.node_list) {
|
284
|
-
doc.node_list.forEach(function(node){
|
285
|
-
emit(node, 1);
|
286
|
-
});
|
287
|
-
}
|
288
|
-
}"
|
289
|
-
|
290
|
-
def get_nodes
|
291
|
-
# Return all nodes, with the master being first
|
292
|
-
all_nodes = node_list
|
293
|
-
all_nodes.delete(master_node)
|
294
|
-
all_nodes = [master_node] + all_nodes
|
295
|
-
allnodes
|
296
|
-
end
|
297
|
-
|
298
|
-
end
|
299
|
-
|
300
|
-
|
301
|
-
class ContentShard
|
302
|
-
attr_accessor :nodes, :this_shard, :our_node, :this_shard_db
|
303
|
-
|
304
|
-
def initialize(nodesv, this_shardv)
|
305
|
-
@nodes = nodesv
|
306
|
-
@this_shard = this_shardv
|
307
|
-
|
308
|
-
# Work out the rest
|
309
|
-
@our_node = nodesv.our_node
|
310
|
-
@this_shard_db = nodesv.our_node.get_content_db(this_shardv.shard_db_name)
|
311
|
-
end
|
312
|
-
|
313
|
-
def in_shard?(internal_id)
|
314
|
-
internal_id >= this_shard.range_start && internal_id < this_shard.range_end
|
315
|
-
end
|
316
|
-
|
317
|
-
def write_doc(content_doc)
|
318
|
-
# Write a content document to this shard
|
319
|
-
# Make sure it is in this shard
|
320
|
-
if in_shard? content_doc.internal_id
|
321
|
-
this_shard_db.save_doc(content_doc)
|
322
|
-
end
|
323
|
-
end
|
324
|
-
|
325
|
-
def sync
|
326
|
-
# A Shard only pushes with the master of the shard
|
327
|
-
# or the node with the highest is_storage value alive
|
328
|
-
# Shard masters ensure changes are pushed to all
|
329
|
-
|
330
|
-
# NOTE: This method needs serious refactoring
|
331
|
-
|
332
|
-
# Are we the shard master?
|
333
|
-
am_master = false
|
334
|
-
if our_node.name == this_shard.master_node
|
335
|
-
am_master = true
|
336
|
-
end
|
337
|
-
|
338
|
-
if !am_master
|
339
|
-
begin
|
340
|
-
master_node = Nodes.by_name(this_shard.master_node).first
|
341
|
-
remotedb = MASTER_node.get_content_db(this_shard.shard_db_name)
|
342
|
-
this_shard_db.replicate_to(remote_db)
|
343
|
-
return # We sync-ed so job is done
|
344
|
-
rescue
|
345
|
-
# We flag the master as unavailable
|
346
|
-
if remote_node
|
347
|
-
master_node.status = "UNAVAILABLE"
|
348
|
-
master_node.save
|
349
|
-
end
|
350
|
-
end
|
351
|
-
end
|
352
|
-
|
353
|
-
# Either we are the master or the master has failed -- we replicate with
|
354
|
-
# all nodes or the first available aside us and master
|
355
|
-
this_shard.node_list.each do |node_name|
|
356
|
-
if !(our_node.name == node_name) && !(this_shard.master_node == node_name)
|
357
|
-
begin
|
358
|
-
# Push all changes to the other nodes
|
359
|
-
remote_node = Nodes.by_name(node_name).first
|
360
|
-
remotedb = remote_node.get_content_db(this_shard.shard_db_name)
|
361
|
-
this_shard_db.replicate_to(remote_db)
|
362
|
-
break if !am_master
|
363
|
-
rescue
|
364
|
-
# Make sure that the node exist in the DB and flag it as unresponsive
|
365
|
-
if remote_node
|
366
|
-
remote_node.status = "UNAVAILABLE"
|
367
|
-
remote_node.save
|
368
|
-
end
|
369
|
-
end
|
370
|
-
end
|
371
|
-
|
372
|
-
end
|
373
|
-
end
|
374
|
-
end
|
375
|
-
|
376
|
-
class ContentDoc < CouchRest::ExtendedDocument
|
377
|
-
# NOTE: NO DEFAULT DATABASE IN THE OBJECT -- WE WILL BE STORING A LOT OF
|
378
|
-
# DATABASES OF THIS TYPE.
|
379
|
-
|
380
|
-
property :internal_id
|
381
|
-
property :size_bytes
|
382
|
-
property :filenane
|
383
|
-
property :mime_type
|
384
|
-
|
385
|
-
view_by :internal_id
|
386
|
-
|
387
|
-
# A special attachment "File" is expected to exist
|
388
|
-
end
|