mongo 1.3.1 → 1.4.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- data/README.md +9 -6
- data/Rakefile +3 -4
- data/docs/HISTORY.md +20 -2
- data/docs/READ_PREFERENCE.md +39 -0
- data/docs/RELEASES.md +1 -1
- data/docs/REPLICA_SETS.md +23 -2
- data/docs/TAILABLE_CURSORS.md +51 -0
- data/docs/TUTORIAL.md +4 -4
- data/docs/WRITE_CONCERN.md +5 -2
- data/lib/mongo.rb +7 -22
- data/lib/mongo/collection.rb +96 -29
- data/lib/mongo/connection.rb +107 -62
- data/lib/mongo/cursor.rb +136 -57
- data/lib/mongo/db.rb +26 -5
- data/lib/mongo/exceptions.rb +17 -1
- data/lib/mongo/gridfs/grid.rb +1 -1
- data/lib/mongo/repl_set_connection.rb +273 -156
- data/lib/mongo/util/logging.rb +42 -0
- data/lib/mongo/util/node.rb +183 -0
- data/lib/mongo/util/pool.rb +76 -13
- data/lib/mongo/util/pool_manager.rb +208 -0
- data/lib/mongo/util/ssl_socket.rb +38 -0
- data/lib/mongo/util/support.rb +9 -1
- data/lib/mongo/util/timeout.rb +42 -0
- data/lib/mongo/version.rb +3 -0
- data/mongo.gemspec +2 -2
- data/test/bson/binary_test.rb +1 -1
- data/test/bson/bson_string_test.rb +30 -0
- data/test/bson/bson_test.rb +6 -3
- data/test/bson/byte_buffer_test.rb +1 -1
- data/test/bson/hash_with_indifferent_access_test.rb +1 -1
- data/test/bson/json_test.rb +1 -1
- data/test/bson/object_id_test.rb +2 -18
- data/test/bson/ordered_hash_test.rb +38 -3
- data/test/bson/test_helper.rb +46 -0
- data/test/bson/timestamp_test.rb +32 -10
- data/test/collection_test.rb +89 -3
- data/test/connection_test.rb +35 -20
- data/test/cursor_test.rb +63 -2
- data/test/db_test.rb +12 -2
- data/test/pool_test.rb +21 -0
- data/test/replica_sets/connect_test.rb +26 -13
- data/test/replica_sets/connection_string_test.rb +1 -4
- data/test/replica_sets/count_test.rb +1 -0
- data/test/replica_sets/insert_test.rb +1 -0
- data/test/replica_sets/pooled_insert_test.rb +4 -1
- data/test/replica_sets/query_secondaries.rb +2 -1
- data/test/replica_sets/query_test.rb +2 -1
- data/test/replica_sets/read_preference_test.rb +43 -0
- data/test/replica_sets/refresh_test.rb +123 -0
- data/test/replica_sets/replication_ack_test.rb +9 -4
- data/test/replica_sets/rs_test_helper.rb +2 -2
- data/test/timeout_test.rb +14 -0
- data/test/tools/repl_set_manager.rb +134 -23
- data/test/unit/collection_test.rb +6 -8
- data/test/unit/connection_test.rb +4 -4
- data/test/unit/cursor_test.rb +23 -5
- data/test/unit/db_test.rb +2 -0
- data/test/unit/grid_test.rb +2 -0
- data/test/unit/node_test.rb +73 -0
- data/test/unit/pool_manager_test.rb +47 -0
- data/test/unit/read_test.rb +101 -0
- metadata +214 -138
- data/lib/mongo/test.rb +0 -20
- data/test/async/collection_test.rb +0 -224
- data/test/async/connection_test.rb +0 -24
- data/test/async/cursor_test.rb +0 -162
- data/test/async/worker_pool_test.rb +0 -99
- data/test/load/resque/load.rb +0 -21
- data/test/load/resque/processor.rb +0 -26
- data/test/load/unicorn/unicorn.rb +0 -29
- data/test/tools/load.rb +0 -58
- data/test/tools/sharding_manager.rb +0 -202
- data/test/tools/test.rb +0 -4
- data/test/unit/repl_set_connection_test.rb +0 -59
@@ -0,0 +1,123 @@
|
|
1
|
+
$:.unshift(File.join(File.dirname(__FILE__), '..', 'lib'))
|
2
|
+
require './test/replica_sets/rs_test_helper'
|
3
|
+
require 'benchmark'
|
4
|
+
|
5
|
+
# on ports TEST_PORT, RS.ports[1], and TEST + 2.
|
6
|
+
class ReplicaSetRefreshTest < Test::Unit::TestCase
|
7
|
+
include Mongo
|
8
|
+
|
9
|
+
def setup
|
10
|
+
@conn = nil
|
11
|
+
end
|
12
|
+
|
13
|
+
def teardown
|
14
|
+
RS.restart_killed_nodes
|
15
|
+
@conn.close if @conn
|
16
|
+
end
|
17
|
+
|
18
|
+
def test_connect_speed
|
19
|
+
Benchmark.bm do |x|
|
20
|
+
x.report("Connect") do
|
21
|
+
10.times do
|
22
|
+
ReplSetConnection.new([RS.host, RS.ports[0]], [RS.host, RS.ports[1]],
|
23
|
+
[RS.host, RS.ports[2]], :background_refresh => false)
|
24
|
+
end
|
25
|
+
end
|
26
|
+
|
27
|
+
@con = ReplSetConnection.new([RS.host, RS.ports[0]], [RS.host, RS.ports[1]],
|
28
|
+
[RS.host, RS.ports[2]], :background_refresh => false)
|
29
|
+
|
30
|
+
x.report("manager") do
|
31
|
+
man = Mongo::PoolManager.new(@con, @con.seeds)
|
32
|
+
10.times do
|
33
|
+
man.connect
|
34
|
+
end
|
35
|
+
end
|
36
|
+
end
|
37
|
+
end
|
38
|
+
|
39
|
+
def test_connect_and_manual_refresh_with_secondaries_down
|
40
|
+
RS.kill_all_secondaries
|
41
|
+
|
42
|
+
rescue_connection_failure do
|
43
|
+
@conn = ReplSetConnection.new([RS.host, RS.ports[0]], [RS.host, RS.ports[1]],
|
44
|
+
[RS.host, RS.ports[2]], :background_refresh => false)
|
45
|
+
end
|
46
|
+
|
47
|
+
assert_equal [], @conn.secondaries
|
48
|
+
assert @conn.connected?
|
49
|
+
assert_equal @conn.read_pool, @conn.primary_pool
|
50
|
+
|
51
|
+
# Refresh with no change to set
|
52
|
+
@conn.refresh
|
53
|
+
assert_equal [], @conn.secondaries
|
54
|
+
assert @conn.connected?
|
55
|
+
assert_equal @conn.read_pool, @conn.primary_pool
|
56
|
+
|
57
|
+
RS.restart_killed_nodes
|
58
|
+
assert_equal [], @conn.secondaries
|
59
|
+
assert @conn.connected?
|
60
|
+
assert_equal @conn.read_pool, @conn.primary_pool
|
61
|
+
|
62
|
+
# Refresh with everything up
|
63
|
+
@conn.refresh
|
64
|
+
assert @conn.read_pool
|
65
|
+
assert @conn.secondaries.length > 0
|
66
|
+
end
|
67
|
+
|
68
|
+
def test_automated_refresh_with_secondaries_down
|
69
|
+
RS.kill_all_secondaries
|
70
|
+
|
71
|
+
rescue_connection_failure do
|
72
|
+
@conn = ReplSetConnection.new([RS.host, RS.ports[0]], [RS.host, RS.ports[1]],
|
73
|
+
[RS.host, RS.ports[2]], :refresh_interval => 2, :background_refresh => true)
|
74
|
+
end
|
75
|
+
|
76
|
+
assert_equal [], @conn.secondaries
|
77
|
+
assert @conn.connected?
|
78
|
+
assert_equal @conn.read_pool, @conn.primary_pool
|
79
|
+
|
80
|
+
RS.restart_killed_nodes
|
81
|
+
|
82
|
+
sleep(3)
|
83
|
+
|
84
|
+
assert @conn.read_pool != @conn.primary_pool, "Read pool and primary pool are identical."
|
85
|
+
assert @conn.secondaries.length > 0, "No secondaries have been added."
|
86
|
+
end
|
87
|
+
|
88
|
+
def test_automated_refresh_with_removed_node
|
89
|
+
@conn = ReplSetConnection.new([RS.host, RS.ports[0]], [RS.host, RS.ports[1]],
|
90
|
+
[RS.host, RS.ports[2]], :refresh_interval => 2, :background_refresh => true)
|
91
|
+
|
92
|
+
assert_equal 2, @conn.secondary_pools.length
|
93
|
+
assert_equal 2, @conn.secondaries.length
|
94
|
+
|
95
|
+
n = RS.remove_secondary_node
|
96
|
+
sleep(4)
|
97
|
+
|
98
|
+
assert_equal 1, @conn.secondaries.length
|
99
|
+
assert_equal 1, @conn.secondary_pools.length
|
100
|
+
|
101
|
+
RS.add_node(n)
|
102
|
+
end
|
103
|
+
|
104
|
+
def test_adding_and_removing_nodes
|
105
|
+
@conn = ReplSetConnection.new([RS.host, RS.ports[0]], [RS.host, RS.ports[1]],
|
106
|
+
[RS.host, RS.ports[2]], :refresh_interval => 2, :background_refresh => true)
|
107
|
+
|
108
|
+
RS.add_node
|
109
|
+
sleep(5)
|
110
|
+
|
111
|
+
@conn2 = ReplSetConnection.new([RS.host, RS.ports[0]], [RS.host, RS.ports[1]],
|
112
|
+
[RS.host, RS.ports[2]], :refresh_interval => 2, :background_refresh => true)
|
113
|
+
|
114
|
+
assert @conn2.secondaries == @conn.secondaries
|
115
|
+
assert_equal 3, @conn.secondary_pools.length
|
116
|
+
assert_equal 3, @conn.secondaries.length
|
117
|
+
|
118
|
+
RS.remove_secondary_node
|
119
|
+
sleep(4)
|
120
|
+
assert_equal 2, @conn.secondary_pools.length
|
121
|
+
assert_equal 2, @conn.secondaries.length
|
122
|
+
end
|
123
|
+
end
|
@@ -20,6 +20,11 @@ class ReplicaSetAckTest < Test::Unit::TestCase
|
|
20
20
|
@col = @db.collection("test-sets")
|
21
21
|
end
|
22
22
|
|
23
|
+
def teardown
|
24
|
+
RS.restart_killed_nodes
|
25
|
+
@conn.close if @conn
|
26
|
+
end
|
27
|
+
|
23
28
|
def test_safe_mode_with_w_failure
|
24
29
|
assert_raise_error OperationFailure, "timeout" do
|
25
30
|
@col.insert({:foo => 1}, :safe => {:w => 4, :wtimeout => 1, :fsync => true})
|
@@ -33,15 +38,15 @@ class ReplicaSetAckTest < Test::Unit::TestCase
|
|
33
38
|
end
|
34
39
|
|
35
40
|
def test_safe_mode_replication_ack
|
36
|
-
@col.insert({:baz => "bar"}, :safe => {:w =>
|
41
|
+
@col.insert({:baz => "bar"}, :safe => {:w => 3, :wtimeout => 5000})
|
37
42
|
|
38
|
-
assert @col.insert({:foo => "0" * 5000}, :safe => {:w =>
|
43
|
+
assert @col.insert({:foo => "0" * 5000}, :safe => {:w => 3, :wtimeout => 5000})
|
39
44
|
assert_equal 2, @slave1[MONGO_TEST_DB]["test-sets"].count
|
40
45
|
|
41
|
-
assert @col.update({:baz => "bar"}, {:baz => "foo"}, :safe => {:w =>
|
46
|
+
assert @col.update({:baz => "bar"}, {:baz => "foo"}, :safe => {:w => 3, :wtimeout => 5000})
|
42
47
|
assert @slave1[MONGO_TEST_DB]["test-sets"].find_one({:baz => "foo"})
|
43
48
|
|
44
|
-
assert @col.remove({}, :safe => {:w =>
|
49
|
+
assert @col.remove({}, :safe => {:w => 3, :wtimeout => 5000})
|
45
50
|
assert_equal 0, @slave1[MONGO_TEST_DB]["test-sets"].count
|
46
51
|
end
|
47
52
|
|
@@ -11,7 +11,7 @@ class Test::Unit::TestCase
|
|
11
11
|
|
12
12
|
# Generic code for rescuing connection failures and retrying operations.
|
13
13
|
# This could be combined with some timeout functionality.
|
14
|
-
def rescue_connection_failure(max_retries=
|
14
|
+
def rescue_connection_failure(max_retries=30)
|
15
15
|
retries = 0
|
16
16
|
begin
|
17
17
|
yield
|
@@ -19,7 +19,7 @@ class Test::Unit::TestCase
|
|
19
19
|
puts "Rescue attempt #{retries}: from #{ex}"
|
20
20
|
retries += 1
|
21
21
|
raise ex if retries > max_retries
|
22
|
-
sleep(
|
22
|
+
sleep(2)
|
23
23
|
retry
|
24
24
|
end
|
25
25
|
end
|
@@ -0,0 +1,14 @@
|
|
1
|
+
require './test/test_helper'
|
2
|
+
|
3
|
+
class TestTimeout < Test::Unit::TestCase
|
4
|
+
|
5
|
+
def test_timeout
|
6
|
+
@conn = standard_connection(:op_timeout => 2)
|
7
|
+
assert @conn[MONGO_TEST_DB]['test'].save({:a => 1})
|
8
|
+
assert @conn[MONGO_TEST_DB]['test'].find.next
|
9
|
+
assert_raise OperationTimeout do
|
10
|
+
@conn[MONGO_TEST_DB]['test'].find({'$where' => 'function() { while(true) { this.a == 1 } }'}).next
|
11
|
+
end
|
12
|
+
end
|
13
|
+
|
14
|
+
end
|
@@ -10,17 +10,21 @@ end
|
|
10
10
|
|
11
11
|
class ReplSetManager
|
12
12
|
|
13
|
-
attr_accessor :host, :start_port, :ports, :name, :mongods
|
13
|
+
attr_accessor :host, :start_port, :ports, :name, :mongods, :tags, :version
|
14
14
|
|
15
15
|
def initialize(opts={})
|
16
16
|
@start_port = opts[:start_port] || 30000
|
17
17
|
@ports = []
|
18
18
|
@name = opts[:name] || 'replica-set-foo'
|
19
19
|
@host = opts[:host] || 'localhost'
|
20
|
-
@retries = opts[:retries] ||
|
20
|
+
@retries = opts[:retries] || 30
|
21
21
|
@config = {"_id" => @name, "members" => []}
|
22
22
|
@durable = opts.fetch(:durable, false)
|
23
23
|
@path = File.join(File.expand_path(File.dirname(__FILE__)), "data")
|
24
|
+
@oplog_size = opts.fetch(:oplog_size, 32)
|
25
|
+
@tags = [{"dc" => "ny", "rack" => "a", "db" => "main"},
|
26
|
+
{"dc" => "ny", "rack" => "b", "db" => "main"},
|
27
|
+
{"dc" => "sf", "rack" => "a", "db" => "main"}]
|
24
28
|
|
25
29
|
@arbiter_count = opts[:arbiter_count] || 2
|
26
30
|
@secondary_count = opts[:secondary_count] || 2
|
@@ -33,35 +37,59 @@ class ReplSetManager
|
|
33
37
|
end
|
34
38
|
|
35
39
|
@mongods = {}
|
40
|
+
version_string = `mongod --version`
|
41
|
+
version_string =~ /(\d\.\d\.\d)/
|
42
|
+
@version = $1.split(".").map {|d| d.to_i }
|
36
43
|
end
|
37
44
|
|
38
45
|
def start_set
|
39
|
-
|
46
|
+
begin
|
47
|
+
con = Mongo::Connection.new(@host, @start_port)
|
48
|
+
rescue Mongo::ConnectionFailure
|
49
|
+
end
|
40
50
|
|
41
|
-
|
51
|
+
if con && ensure_up(1, con)
|
52
|
+
should_start = false
|
53
|
+
puts "** Replica set already started."
|
54
|
+
else
|
55
|
+
should_start = true
|
56
|
+
system("killall mongod")
|
57
|
+
puts "** Starting a replica set with #{@count} nodes"
|
58
|
+
end
|
42
59
|
|
43
60
|
n = 0
|
44
61
|
(@primary_count + @secondary_count).times do
|
45
|
-
init_node(n)
|
62
|
+
init_node(n, should_start) do |attrs|
|
63
|
+
if @version[0] >= 2
|
64
|
+
attrs['tags'] = @tags[n % @tags.size]
|
65
|
+
end
|
66
|
+
end
|
46
67
|
n += 1
|
47
68
|
end
|
48
69
|
|
49
70
|
@passive_count.times do
|
50
|
-
init_node(n) do |attrs|
|
71
|
+
init_node(n, should_start) do |attrs|
|
51
72
|
attrs['priority'] = 0
|
52
73
|
end
|
53
74
|
n += 1
|
54
75
|
end
|
55
76
|
|
56
77
|
@arbiter_count.times do
|
57
|
-
init_node(n) do |attrs|
|
78
|
+
init_node(n, should_start) do |attrs|
|
58
79
|
attrs['arbiterOnly'] = true
|
59
80
|
end
|
60
81
|
n += 1
|
61
82
|
end
|
62
83
|
|
63
|
-
|
64
|
-
|
84
|
+
if con && ensure_up(1, con)
|
85
|
+
@mongods.each do |k, v|
|
86
|
+
v['up'] = true
|
87
|
+
v['pid'] = File.open(File.join(v['db_path'], 'mongod.lock')).read.strip
|
88
|
+
end
|
89
|
+
else
|
90
|
+
initiate
|
91
|
+
ensure_up
|
92
|
+
end
|
65
93
|
end
|
66
94
|
|
67
95
|
def cleanup_set
|
@@ -71,18 +99,20 @@ class ReplSetManager
|
|
71
99
|
end
|
72
100
|
end
|
73
101
|
|
74
|
-
def init_node(n)
|
102
|
+
def init_node(n, should_start=true)
|
75
103
|
@mongods[n] ||= {}
|
76
104
|
port = @start_port + n
|
77
105
|
@ports << port
|
78
106
|
@mongods[n]['port'] = port
|
79
107
|
@mongods[n]['db_path'] = get_path("rs-#{port}")
|
80
108
|
@mongods[n]['log_path'] = get_path("log-#{port}")
|
81
|
-
system("rm -rf #{@mongods[n]['db_path']}")
|
82
|
-
system("mkdir -p #{@mongods[n]['db_path']}")
|
83
|
-
|
84
109
|
@mongods[n]['start'] = start_cmd(n)
|
85
|
-
|
110
|
+
|
111
|
+
if should_start
|
112
|
+
system("rm -rf #{@mongods[n]['db_path']}")
|
113
|
+
system("mkdir -p #{@mongods[n]['db_path']}")
|
114
|
+
start(n)
|
115
|
+
end
|
86
116
|
|
87
117
|
member = {'_id' => n, 'host' => "#{@host}:#{@mongods[n]['port']}"}
|
88
118
|
|
@@ -96,13 +126,64 @@ class ReplSetManager
|
|
96
126
|
@config['members'] << member
|
97
127
|
end
|
98
128
|
|
129
|
+
def journal_switch
|
130
|
+
if @version[0] >= 2
|
131
|
+
if @durable
|
132
|
+
"--journal"
|
133
|
+
else
|
134
|
+
"--nojournal"
|
135
|
+
end
|
136
|
+
elsif @durable
|
137
|
+
"--journal"
|
138
|
+
end
|
139
|
+
end
|
140
|
+
|
99
141
|
def start_cmd(n)
|
100
142
|
@mongods[n]['start'] = "mongod --replSet #{@name} --logpath '#{@mongods[n]['log_path']}' " +
|
101
|
-
" --dbpath #{@mongods[n]['db_path']} --port #{@mongods[n]['port']} --fork"
|
143
|
+
"--oplogSize #{@oplog_size} #{journal_switch} --dbpath #{@mongods[n]['db_path']} --port #{@mongods[n]['port']} --fork"
|
102
144
|
@mongods[n]['start'] += " --dur" if @durable
|
103
145
|
@mongods[n]['start']
|
104
146
|
end
|
105
147
|
|
148
|
+
def remove_secondary_node
|
149
|
+
primary = get_node_with_state(1)
|
150
|
+
con = get_connection(primary)
|
151
|
+
config = con['local']['system.replset'].find_one
|
152
|
+
secondary = get_node_with_state(2)
|
153
|
+
host_port = "#{@host}:#{@mongods[secondary]['port']}"
|
154
|
+
kill(secondary)
|
155
|
+
@mongods.delete(secondary)
|
156
|
+
@config['members'].reject! {|m| m['host'] == host_port}
|
157
|
+
@config['version'] = config['version'] + 1
|
158
|
+
|
159
|
+
begin
|
160
|
+
con['admin'].command({'replSetReconfig' => @config})
|
161
|
+
rescue Mongo::ConnectionFailure
|
162
|
+
end
|
163
|
+
|
164
|
+
con.close
|
165
|
+
|
166
|
+
return secondary
|
167
|
+
end
|
168
|
+
|
169
|
+
def add_node(n=nil)
|
170
|
+
primary = get_node_with_state(1)
|
171
|
+
con = get_connection(primary)
|
172
|
+
init_node(n || @mongods.length)
|
173
|
+
|
174
|
+
config = con['local']['system.replset'].find_one
|
175
|
+
@config['version'] = config['version'] + 1
|
176
|
+
|
177
|
+
# We expect a connection failure on reconfigure here.
|
178
|
+
begin
|
179
|
+
con['admin'].command({'replSetReconfig' => @config})
|
180
|
+
rescue Mongo::ConnectionFailure
|
181
|
+
end
|
182
|
+
|
183
|
+
con.close
|
184
|
+
ensure_up
|
185
|
+
end
|
186
|
+
|
106
187
|
def kill(node, signal=2)
|
107
188
|
pid = @mongods[node]['pid']
|
108
189
|
puts "** Killing node with pid #{pid} at port #{@mongods[node]['port']}"
|
@@ -127,6 +208,7 @@ class ReplSetManager
|
|
127
208
|
con['admin'].command({'replSetStepDown' => 90})
|
128
209
|
rescue Mongo::ConnectionFailure
|
129
210
|
end
|
211
|
+
con.close
|
130
212
|
end
|
131
213
|
|
132
214
|
def kill_secondary
|
@@ -135,6 +217,15 @@ class ReplSetManager
|
|
135
217
|
return node
|
136
218
|
end
|
137
219
|
|
220
|
+
def kill_all_secondaries
|
221
|
+
nodes = get_all_nodes_with_state(2)
|
222
|
+
if nodes
|
223
|
+
nodes.each do |n|
|
224
|
+
kill(n)
|
225
|
+
end
|
226
|
+
end
|
227
|
+
end
|
228
|
+
|
138
229
|
def restart_killed_nodes
|
139
230
|
nodes = @mongods.keys.select do |key|
|
140
231
|
@mongods[key]['up'] == false
|
@@ -159,21 +250,26 @@ class ReplSetManager
|
|
159
250
|
end
|
160
251
|
alias :restart :start
|
161
252
|
|
162
|
-
def ensure_up
|
253
|
+
def ensure_up(n=nil, connection=nil)
|
163
254
|
print "** Ensuring members are up..."
|
164
255
|
|
165
|
-
attempt do
|
166
|
-
con = get_connection
|
256
|
+
attempt(n) do
|
257
|
+
con = connection || get_connection
|
167
258
|
status = con['admin'].command({'replSetGetStatus' => 1})
|
168
259
|
print "."
|
169
|
-
if status['members'].all? { |m| m['health'] == 1 &&
|
260
|
+
if status['members'].all? { |m| m['health'] == 1 &&
|
261
|
+
[1, 2, 7].include?(m['state']) } &&
|
170
262
|
status['members'].any? { |m| m['state'] == 1 }
|
171
263
|
print "all members up!\n\n"
|
264
|
+
con.close
|
172
265
|
return status
|
173
266
|
else
|
267
|
+
con.close
|
174
268
|
raise Mongo::OperationFailure
|
175
269
|
end
|
176
270
|
end
|
271
|
+
|
272
|
+
return false
|
177
273
|
end
|
178
274
|
|
179
275
|
def primary
|
@@ -207,6 +303,20 @@ class ReplSetManager
|
|
207
303
|
attempt do
|
208
304
|
con['admin'].command({'replSetInitiate' => @config})
|
209
305
|
end
|
306
|
+
|
307
|
+
con.close
|
308
|
+
end
|
309
|
+
|
310
|
+
def get_all_nodes_with_state(state)
|
311
|
+
status = ensure_up
|
312
|
+
nodes = status['members'].select {|m| m['state'] == state}
|
313
|
+
nodes = nodes.map do |node|
|
314
|
+
host_port = node['name'].split(':')
|
315
|
+
port = host_port[1] ? host_port[1].to_i : 27017
|
316
|
+
@mongods.keys.detect {|key| @mongods[key]['port'] == port}
|
317
|
+
end
|
318
|
+
|
319
|
+
nodes == [] ? false : nodes
|
210
320
|
end
|
211
321
|
|
212
322
|
def get_node_with_state(state)
|
@@ -215,7 +325,7 @@ class ReplSetManager
|
|
215
325
|
if node
|
216
326
|
host_port = node['name'].split(':')
|
217
327
|
port = host_port[1] ? host_port[1].to_i : 27017
|
218
|
-
key = @mongods.keys.detect {|
|
328
|
+
key = @mongods.keys.detect {|n| @mongods[n]['port'] == port}
|
219
329
|
return key
|
220
330
|
else
|
221
331
|
return false
|
@@ -247,19 +357,20 @@ class ReplSetManager
|
|
247
357
|
File.join(@path, name)
|
248
358
|
end
|
249
359
|
|
250
|
-
def attempt
|
360
|
+
def attempt(retries=nil)
|
251
361
|
raise "No block given!" unless block_given?
|
252
362
|
count = 0
|
253
363
|
|
254
|
-
while count < @retries do
|
364
|
+
while count < (retries || @retries) do
|
255
365
|
begin
|
256
366
|
return yield
|
257
367
|
rescue Mongo::OperationFailure, Mongo::ConnectionFailure => ex
|
258
|
-
sleep(
|
368
|
+
sleep(2)
|
259
369
|
count += 1
|
260
370
|
end
|
261
371
|
end
|
262
372
|
|
373
|
+
puts "NO MORE ATTEMPTS"
|
263
374
|
raise ex
|
264
375
|
end
|
265
376
|
|