mongo 1.2.0 → 1.2.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- data/README.md +15 -3
- data/docs/FAQ.md +4 -0
- data/docs/HISTORY.md +11 -4
- data/lib/mongo.rb +1 -1
- data/lib/mongo/collection.rb +28 -25
- data/lib/mongo/connection.rb +54 -12
- data/lib/mongo/cursor.rb +17 -11
- data/lib/mongo/db.rb +31 -11
- data/lib/mongo/repl_set_connection.rb +17 -2
- data/lib/mongo/util/pool.rb +50 -6
- data/test/auxillary/repl_set_auth_test.rb +58 -0
- data/test/auxillary/threaded_authentication_test.rb +101 -0
- data/test/bson/bson_test.rb +43 -0
- data/test/connection_test.rb +1 -1
- data/test/db_api_test.rb +0 -37
- data/test/load/thin/load.rb +24 -0
- data/test/load/unicorn/load.rb +23 -0
- data/test/load/unicorn/unicorn.rb +29 -0
- data/test/tools/auth_repl_set_manager.rb +14 -0
- data/test/tools/load.rb +58 -0
- data/test/tools/repl_set_manager.rb +34 -9
- data/test/tools/sharding_manager.rb +202 -0
- data/test/tools/test.rb +3 -12
- data/test/unit/collection_test.rb +19 -22
- data/test/unit/connection_test.rb +0 -1
- data/test/unit/db_test.rb +1 -0
- metadata +23 -11
@@ -0,0 +1,14 @@
|
|
1
|
+
require File.join((File.expand_path(File.dirname(__FILE__))), 'repl_set_manager')
|
2
|
+
|
3
|
+
class AuthReplSetManager < ReplSetManager
|
4
|
+
def initialize(opts={})
|
5
|
+
super(opts)
|
6
|
+
|
7
|
+
@key_path = opts[:key_path] || File.join(File.expand_path(File.dirname(__FILE__)), "keyfile.txt")
|
8
|
+
system("chmod 600 #{@key_path}")
|
9
|
+
end
|
10
|
+
|
11
|
+
def start_cmd(n)
|
12
|
+
super + " --keyFile #{@key_path}"
|
13
|
+
end
|
14
|
+
end
|
data/test/tools/load.rb
ADDED
@@ -0,0 +1,58 @@
|
|
1
|
+
require 'rubygems'
|
2
|
+
require 'mongo'
|
3
|
+
require 'sharding_manager'
|
4
|
+
|
5
|
+
class MongoLoader
|
6
|
+
|
7
|
+
def initialize
|
8
|
+
@mongo = Mongo::Connection.new("localhost", 50000)
|
9
|
+
@data = BSON::Binary.new(File.open("tools.gz").read)
|
10
|
+
@count = 0
|
11
|
+
@manager = ShardingManager.new(:config_count => 3)
|
12
|
+
@manager.start_cluster
|
13
|
+
end
|
14
|
+
|
15
|
+
def kill
|
16
|
+
@manager.kill_random
|
17
|
+
end
|
18
|
+
|
19
|
+
def restart
|
20
|
+
@manager.restart_killed_nodes
|
21
|
+
end
|
22
|
+
|
23
|
+
def run
|
24
|
+
Thread.new do
|
25
|
+
("a".."z").each do |p|
|
26
|
+
seed(p)
|
27
|
+
end
|
28
|
+
end
|
29
|
+
end
|
30
|
+
|
31
|
+
def seed(prefix)
|
32
|
+
@queue = []
|
33
|
+
1000.times do |n|
|
34
|
+
id = BSON::OrderedHash.new
|
35
|
+
id[:p] = prefix
|
36
|
+
id[:c] = n
|
37
|
+
@queue << {:tid => id, :data => @data}
|
38
|
+
end
|
39
|
+
|
40
|
+
while @queue.length > 0 do
|
41
|
+
begin
|
42
|
+
doc = @queue.pop
|
43
|
+
@mongo['app']['photos'].insert(doc, :safe => {:w => 3})
|
44
|
+
@count += 1
|
45
|
+
p @count
|
46
|
+
rescue StandardError => e
|
47
|
+
p e
|
48
|
+
p @count
|
49
|
+
@queue.push(doc)
|
50
|
+
@count -= 1
|
51
|
+
sleep(10)
|
52
|
+
retry
|
53
|
+
end
|
54
|
+
end
|
55
|
+
end
|
56
|
+
end
|
57
|
+
|
58
|
+
@m = MongoLoader.new
|
@@ -1,5 +1,7 @@
|
|
1
1
|
#!/usr/bin/ruby
|
2
2
|
|
3
|
+
require 'thread'
|
4
|
+
|
3
5
|
STDOUT.sync = true
|
4
6
|
|
5
7
|
unless defined? Mongo
|
@@ -17,6 +19,7 @@ class ReplSetManager
|
|
17
19
|
@host = opts[:host] || 'localhost'
|
18
20
|
@retries = opts[:retries] || 60
|
19
21
|
@config = {"_id" => @name, "members" => []}
|
22
|
+
@durable = opts.fetch(:durable, false)
|
20
23
|
@path = File.join(File.expand_path(File.dirname(__FILE__)), "data")
|
21
24
|
|
22
25
|
@arbiter_count = opts[:arbiter_count] || 2
|
@@ -61,6 +64,13 @@ class ReplSetManager
|
|
61
64
|
ensure_up
|
62
65
|
end
|
63
66
|
|
67
|
+
def cleanup_set
|
68
|
+
system("killall mongod")
|
69
|
+
@count.times do |n|
|
70
|
+
system("rm -rf #{@mongods[n]['db_path']}")
|
71
|
+
end
|
72
|
+
end
|
73
|
+
|
64
74
|
def init_node(n)
|
65
75
|
@mongods[n] ||= {}
|
66
76
|
port = @start_port + n
|
@@ -71,9 +81,7 @@ class ReplSetManager
|
|
71
81
|
system("rm -rf #{@mongods[n]['db_path']}")
|
72
82
|
system("mkdir -p #{@mongods[n]['db_path']}")
|
73
83
|
|
74
|
-
@mongods[n]['start'] =
|
75
|
-
" --dbpath #{@mongods[n]['db_path']} --port #{@mongods[n]['port']} --fork"
|
76
|
-
|
84
|
+
@mongods[n]['start'] = start_cmd(n)
|
77
85
|
start(n)
|
78
86
|
|
79
87
|
member = {'_id' => n, 'host' => "#{@host}:#{@mongods[n]['port']}"}
|
@@ -88,17 +96,24 @@ class ReplSetManager
|
|
88
96
|
@config['members'] << member
|
89
97
|
end
|
90
98
|
|
91
|
-
def
|
99
|
+
def start_cmd(n)
|
100
|
+
@mongods[n]['start'] = "mongod --replSet #{@name} --logpath '#{@mongods[n]['log_path']}' " +
|
101
|
+
" --dbpath #{@mongods[n]['db_path']} --port #{@mongods[n]['port']} --fork"
|
102
|
+
@mongods[n]['start'] += " --dur" if @durable
|
103
|
+
@mongods[n]['start']
|
104
|
+
end
|
105
|
+
|
106
|
+
def kill(node, signal=2)
|
92
107
|
pid = @mongods[node]['pid']
|
93
108
|
puts "** Killing node with pid #{pid} at port #{@mongods[node]['port']}"
|
94
|
-
system("kill
|
109
|
+
system("kill -#{signal} #{@mongods[node]['pid']}")
|
95
110
|
@mongods[node]['up'] = false
|
96
111
|
sleep(1)
|
97
112
|
end
|
98
113
|
|
99
|
-
def kill_primary
|
114
|
+
def kill_primary(signal=2)
|
100
115
|
node = get_node_with_state(1)
|
101
|
-
kill(node)
|
116
|
+
kill(node, signal)
|
102
117
|
return node
|
103
118
|
end
|
104
119
|
|
@@ -174,6 +189,16 @@ class ReplSetManager
|
|
174
189
|
get_all_host_pairs_with_state(7)
|
175
190
|
end
|
176
191
|
|
192
|
+
# String used for adding a shard via mongos
|
193
|
+
# using the addshard command.
|
194
|
+
def shard_string
|
195
|
+
str = "#{@name}/"
|
196
|
+
str << @mongods.map do |k, mongod|
|
197
|
+
"#{@host}:#{mongod['port']}"
|
198
|
+
end.join(',')
|
199
|
+
str
|
200
|
+
end
|
201
|
+
|
177
202
|
private
|
178
203
|
|
179
204
|
def initiate
|
@@ -229,13 +254,13 @@ class ReplSetManager
|
|
229
254
|
while count < @retries do
|
230
255
|
begin
|
231
256
|
return yield
|
232
|
-
rescue Mongo::OperationFailure, Mongo::ConnectionFailure
|
257
|
+
rescue Mongo::OperationFailure, Mongo::ConnectionFailure => ex
|
233
258
|
sleep(1)
|
234
259
|
count += 1
|
235
260
|
end
|
236
261
|
end
|
237
262
|
|
238
|
-
raise
|
263
|
+
raise ex
|
239
264
|
end
|
240
265
|
|
241
266
|
end
|
@@ -0,0 +1,202 @@
|
|
1
|
+
require 'repl_set_manager'
|
2
|
+
require 'thread'
|
3
|
+
|
4
|
+
class ShardingManager
|
5
|
+
|
6
|
+
attr_accessor :shards
|
7
|
+
|
8
|
+
def initialize(opts={})
|
9
|
+
@durable = opts.fetch(:durable, true)
|
10
|
+
@host = "localhost"
|
11
|
+
|
12
|
+
@mongos_port = opts[:mongos_port] || 50000
|
13
|
+
@config_port = opts[:config_port] || 40000
|
14
|
+
@shard_start_port = opts[:start_shard_port] || 30000
|
15
|
+
@path = File.join(File.expand_path(File.dirname(__FILE__)), "data")
|
16
|
+
system("rm -rf #{@path}")
|
17
|
+
|
18
|
+
@shard_count = 2
|
19
|
+
@mongos_count = 1
|
20
|
+
@config_count = opts.fetch(:config_count, 1)
|
21
|
+
if ![1, 3].include?(@config_count)
|
22
|
+
raise ArgumentError, "Must specify 1 or 3 config servers."
|
23
|
+
end
|
24
|
+
|
25
|
+
@config_servers = {}
|
26
|
+
@mongos_servers = {}
|
27
|
+
@shards = []
|
28
|
+
@ports = []
|
29
|
+
end
|
30
|
+
|
31
|
+
def kill_random
|
32
|
+
shard_to_kill = rand(@shard_count)
|
33
|
+
@shards[shard_to_kill].kill_primary
|
34
|
+
end
|
35
|
+
|
36
|
+
def restart_killed
|
37
|
+
threads = []
|
38
|
+
@shards.each do |k, shard|
|
39
|
+
threads << Thread.new do
|
40
|
+
shard.restart_killed_nodes
|
41
|
+
end
|
42
|
+
end
|
43
|
+
end
|
44
|
+
|
45
|
+
def start_cluster
|
46
|
+
start_sharding_components
|
47
|
+
start_mongos_servers
|
48
|
+
configure_cluster
|
49
|
+
end
|
50
|
+
|
51
|
+
def configure_cluster
|
52
|
+
add_shards
|
53
|
+
enable_sharding
|
54
|
+
shard_collection
|
55
|
+
end
|
56
|
+
|
57
|
+
def enable_sharding
|
58
|
+
mongos['admin'].command({:enablesharding => "app"})
|
59
|
+
end
|
60
|
+
|
61
|
+
def shard_collection
|
62
|
+
cmd = BSON::OrderedHash.new
|
63
|
+
cmd[:shardcollection] = "app.photos"
|
64
|
+
cmd[:key] = {:tid => 1}
|
65
|
+
p mongos['admin'].command(cmd)
|
66
|
+
end
|
67
|
+
|
68
|
+
def add_shards
|
69
|
+
@shards.each do |shard|
|
70
|
+
cmd = {:addshard => shard.shard_string}
|
71
|
+
p cmd
|
72
|
+
p mongos['admin'].command(cmd)
|
73
|
+
end
|
74
|
+
p mongos['admin'].command({:listshards => 1})
|
75
|
+
end
|
76
|
+
|
77
|
+
def mongos
|
78
|
+
attempt do
|
79
|
+
@mongos ||= Mongo::Connection.new(@host, @mongos_servers[0]['port'])
|
80
|
+
end
|
81
|
+
end
|
82
|
+
|
83
|
+
private
|
84
|
+
|
85
|
+
def start_sharding_components
|
86
|
+
system("killall mongos")
|
87
|
+
|
88
|
+
threads = []
|
89
|
+
threads << Thread.new do
|
90
|
+
start_shards
|
91
|
+
end
|
92
|
+
|
93
|
+
threads << Thread.new do
|
94
|
+
start_config_servers
|
95
|
+
end
|
96
|
+
threads.each {|t| t.join}
|
97
|
+
puts "\nShards and config servers up!"
|
98
|
+
end
|
99
|
+
|
100
|
+
def start_shards
|
101
|
+
threads = []
|
102
|
+
@shard_count.times do |n|
|
103
|
+
threads << Thread.new do
|
104
|
+
port = @shard_start_port + n * 100
|
105
|
+
shard = ReplSetManager.new(:arbiter_count => 0, :secondary_count => 2,
|
106
|
+
:passive_count => 0, :start_port => port, :durable => @durable,
|
107
|
+
:name => "shard-#{n}")
|
108
|
+
shard.start_set
|
109
|
+
shard.ensure_up
|
110
|
+
@shards << shard
|
111
|
+
end
|
112
|
+
end
|
113
|
+
threads.each {|t| t.join}
|
114
|
+
end
|
115
|
+
|
116
|
+
def start_config_servers
|
117
|
+
@config_count.times do |n|
|
118
|
+
@config_servers[n] ||= {}
|
119
|
+
port = @config_port + n
|
120
|
+
@ports << port
|
121
|
+
@config_servers[n]['port'] = port
|
122
|
+
@config_servers[n]['db_path'] = get_path("config-#{port}")
|
123
|
+
@config_servers[n]['log_path'] = get_path("log-config-#{port}")
|
124
|
+
system("rm -rf #{@config_servers[n]['db_path']}")
|
125
|
+
system("mkdir -p #{@config_servers[n]['db_path']}")
|
126
|
+
|
127
|
+
@config_servers[n]['start'] = start_config_cmd(n)
|
128
|
+
|
129
|
+
start(@config_servers, n)
|
130
|
+
end
|
131
|
+
end
|
132
|
+
|
133
|
+
def start_mongos_servers
|
134
|
+
@mongos_count.times do |n|
|
135
|
+
@mongos_servers[n] ||= {}
|
136
|
+
port = @mongos_port + n
|
137
|
+
@ports << port
|
138
|
+
@mongos_servers[n]['port'] = port
|
139
|
+
@mongos_servers[n]['db_path'] = get_path("mongos-#{port}")
|
140
|
+
@mongos_servers[n]['pidfile_path'] = File.join(@mongos_servers[n]['db_path'], "mongod.lock")
|
141
|
+
@mongos_servers[n]['log_path'] = get_path("log-mongos-#{port}")
|
142
|
+
system("rm -rf #{@mongos_servers[n]['db_path']}")
|
143
|
+
system("mkdir -p #{@mongos_servers[n]['db_path']}")
|
144
|
+
|
145
|
+
@mongos_servers[n]['start'] = start_mongos_cmd(n)
|
146
|
+
|
147
|
+
start(@mongos_servers, n)
|
148
|
+
end
|
149
|
+
end
|
150
|
+
|
151
|
+
def start_config_cmd(n)
|
152
|
+
cmd = "mongod --configsvr --logpath '#{@config_servers[n]['log_path']}' " +
|
153
|
+
" --dbpath #{@config_servers[n]['db_path']} --port #{@config_servers[n]['port']} --fork"
|
154
|
+
cmd += " --dur" if @durable
|
155
|
+
cmd
|
156
|
+
end
|
157
|
+
|
158
|
+
def start_mongos_cmd(n)
|
159
|
+
"mongos --configdb #{config_db_string} --logpath '#{@mongos_servers[n]['log_path']}' " +
|
160
|
+
"--pidfilepath #{@mongos_servers[n]['pidfile_path']} --port #{@mongos_servers[n]['port']} --fork"
|
161
|
+
end
|
162
|
+
|
163
|
+
def config_db_string
|
164
|
+
@config_servers.map do |k, v|
|
165
|
+
"#{@host}:#{v['port']}"
|
166
|
+
end.join(',')
|
167
|
+
end
|
168
|
+
|
169
|
+
def start(set, node)
|
170
|
+
system(set[node]['start'])
|
171
|
+
set[node]['up'] = true
|
172
|
+
sleep(0.5)
|
173
|
+
set[node]['pid'] = File.open(File.join(set[node]['db_path'], 'mongod.lock')).read.strip
|
174
|
+
end
|
175
|
+
alias :restart :start
|
176
|
+
|
177
|
+
private
|
178
|
+
|
179
|
+
def cleanup_config
|
180
|
+
end
|
181
|
+
|
182
|
+
def get_path(name)
|
183
|
+
File.join(@path, name)
|
184
|
+
end
|
185
|
+
|
186
|
+
# TODO: put this into a shared module
|
187
|
+
def attempt
|
188
|
+
raise "No block given!" unless block_given?
|
189
|
+
count = 0
|
190
|
+
|
191
|
+
while count < 50 do
|
192
|
+
begin
|
193
|
+
return yield
|
194
|
+
rescue Mongo::OperationFailure, Mongo::ConnectionFailure
|
195
|
+
sleep(1)
|
196
|
+
count += 1
|
197
|
+
end
|
198
|
+
end
|
199
|
+
|
200
|
+
raise exception
|
201
|
+
end
|
202
|
+
end
|
data/test/tools/test.rb
CHANGED
@@ -1,13 +1,4 @@
|
|
1
|
-
require '
|
1
|
+
require 'sharding_manager'
|
2
2
|
|
3
|
-
m =
|
4
|
-
m.
|
5
|
-
|
6
|
-
node = m.kill_secondary
|
7
|
-
m.ensure_up
|
8
|
-
|
9
|
-
puts "Pausing..."
|
10
|
-
gets
|
11
|
-
|
12
|
-
m.start(node)
|
13
|
-
m.ensure_up
|
3
|
+
m = ShardingManager.new(:config_count => 3)
|
4
|
+
m.start_cluster
|
@@ -5,6 +5,7 @@ class CollectionTest < Test::Unit::TestCase
|
|
5
5
|
context "Basic operations: " do
|
6
6
|
setup do
|
7
7
|
@logger = mock()
|
8
|
+
@logger.expects(:debug)
|
8
9
|
end
|
9
10
|
|
10
11
|
should "send update message" do
|
@@ -14,7 +15,7 @@ class CollectionTest < Test::Unit::TestCase
|
|
14
15
|
@conn.expects(:send_message).with do |op, msg, log|
|
15
16
|
op == 2001
|
16
17
|
end
|
17
|
-
@
|
18
|
+
@conn.stubs(:log_operation)
|
18
19
|
@coll.update({}, {:title => 'Moby Dick'})
|
19
20
|
end
|
20
21
|
|
@@ -25,8 +26,8 @@ class CollectionTest < Test::Unit::TestCase
|
|
25
26
|
@conn.expects(:send_message).with do |op, msg, log|
|
26
27
|
op == 2002
|
27
28
|
end
|
28
|
-
@
|
29
|
-
|
29
|
+
@conn.expects(:log_operation).with do |name, payload|
|
30
|
+
(name == :insert) && payload[:documents][0][:title].include?('Moby')
|
30
31
|
end
|
31
32
|
@coll.insert({:title => 'Moby Dick'})
|
32
33
|
end
|
@@ -38,8 +39,8 @@ class CollectionTest < Test::Unit::TestCase
|
|
38
39
|
@conn.expects(:receive_message).with do |op, msg, log, sock|
|
39
40
|
op == 2004
|
40
41
|
end.returns([[], 0, 0])
|
41
|
-
@
|
42
|
-
|
42
|
+
@conn.expects(:log_operation).with do |name, payload|
|
43
|
+
(name == :find) && payload[:selector][:title].include?('Moby')
|
43
44
|
end
|
44
45
|
@coll.find({:title => 'Moby Dick'}).sort([['title', 1], ['author', 1]]).next_document
|
45
46
|
end
|
@@ -52,8 +53,8 @@ class CollectionTest < Test::Unit::TestCase
|
|
52
53
|
@conn.expects(:send_message).with do |op, msg, log|
|
53
54
|
op == 2002
|
54
55
|
end
|
55
|
-
@
|
56
|
-
|
56
|
+
@conn.expects(:log_operation).with do |name, payload|
|
57
|
+
(name == :insert) && payload[:documents][0][:data].inspect.include?('Binary')
|
57
58
|
end
|
58
59
|
@coll.insert({:data => data})
|
59
60
|
end
|
@@ -65,8 +66,8 @@ class CollectionTest < Test::Unit::TestCase
|
|
65
66
|
@conn.expects(:send_message_with_safe_check).with do |op, msg, db_name, log|
|
66
67
|
op == 2001
|
67
68
|
end
|
68
|
-
@
|
69
|
-
|
69
|
+
@conn.expects(:log_operation).with do |name, payload|
|
70
|
+
(name == :update) && payload[:document][:title].include?('Moby')
|
70
71
|
end
|
71
72
|
@coll.update({}, {:title => 'Moby Dick'}, :safe => true)
|
72
73
|
end
|
@@ -78,43 +79,42 @@ class CollectionTest < Test::Unit::TestCase
|
|
78
79
|
@conn.expects(:send_message_with_safe_check).with do |op, msg, db_name, log|
|
79
80
|
op == 2001
|
80
81
|
end
|
81
|
-
@
|
82
|
+
@conn.stubs(:log_operation)
|
82
83
|
@coll.update({}, {:title => 'Moby Dick'}, :safe => true)
|
83
84
|
end
|
84
|
-
|
85
|
+
|
85
86
|
should "not call insert for each ensure_index call" do
|
86
87
|
@conn = Connection.new('localhost', 27017, :logger => @logger, :connect => false)
|
87
88
|
@db = @conn['testing']
|
88
89
|
@coll = @db.collection('books')
|
89
90
|
@coll.expects(:generate_indexes).once
|
90
|
-
|
91
|
+
|
91
92
|
@coll.ensure_index [["x", Mongo::DESCENDING]]
|
92
93
|
@coll.ensure_index [["x", Mongo::DESCENDING]]
|
93
|
-
|
94
94
|
end
|
95
|
+
|
95
96
|
should "call generate_indexes for a new direction on the same field for ensure_index" do
|
96
97
|
@conn = Connection.new('localhost', 27017, :logger => @logger, :connect => false)
|
97
98
|
@db = @conn['testing']
|
98
99
|
@coll = @db.collection('books')
|
99
100
|
@coll.expects(:generate_indexes).twice
|
100
|
-
|
101
|
+
|
101
102
|
@coll.ensure_index [["x", Mongo::DESCENDING]]
|
102
103
|
@coll.ensure_index [["x", Mongo::ASCENDING]]
|
103
|
-
|
104
|
+
|
104
105
|
end
|
105
|
-
|
106
|
+
|
106
107
|
should "call generate_indexes twice because the cache time is 0 seconds" do
|
107
108
|
@conn = Connection.new('localhost', 27017, :logger => @logger, :connect => false)
|
108
109
|
@db = @conn['testing']
|
109
110
|
@db.cache_time = 0
|
110
111
|
@coll = @db.collection('books')
|
111
112
|
@coll.expects(:generate_indexes).twice
|
112
|
-
|
113
113
|
|
114
114
|
@coll.ensure_index [["x", Mongo::DESCENDING]]
|
115
115
|
@coll.ensure_index [["x", Mongo::DESCENDING]]
|
116
116
|
end
|
117
|
-
|
117
|
+
|
118
118
|
should "call generate_indexes for each key when calling ensure_indexes" do
|
119
119
|
@conn = Connection.new('localhost', 27017, :logger => @logger, :connect => false)
|
120
120
|
@db = @conn['testing']
|
@@ -123,11 +123,8 @@ class CollectionTest < Test::Unit::TestCase
|
|
123
123
|
@coll.expects(:generate_indexes).once.with do |a, b, c|
|
124
124
|
a == {"x"=>-1, "y"=>-1}
|
125
125
|
end
|
126
|
-
|
126
|
+
|
127
127
|
@coll.ensure_index [["x", Mongo::DESCENDING], ["y", Mongo::DESCENDING]]
|
128
128
|
end
|
129
|
-
|
130
|
-
|
131
|
-
|
132
129
|
end
|
133
130
|
end
|