mongo 1.3.0.rc0 → 1.3.0

Sign up to get free protection for your applications and to get access to all the features.
@@ -0,0 +1,99 @@
1
+ require 'test/test_helper'
2
+ include Mongo
3
+
4
+ class WorkerPoolTest < Test::Unit::TestCase
5
+ context "Initialization: " do
6
+
7
+ def wait_for_async
8
+ sleep 0.2
9
+ end
10
+
11
+ setup do
12
+ def new_mock_queue
13
+ stub_everything('queue')
14
+ end
15
+
16
+ def new_mock_thread
17
+ stub_everything('thread')
18
+ end
19
+ end
20
+
21
+ context "given a size" do
22
+ setup do
23
+ @size = 5
24
+ end
25
+
26
+ should "allocate a Thread 'size' times" do
27
+ Queue.stubs(:new).returns(new_mock_queue)
28
+ Thread.expects(:new).times(@size).returns(new_mock_thread)
29
+ Async::WorkerPool.new @size
30
+ end
31
+
32
+ should "set 'abort_on_exception' for each current thread" do
33
+ Queue.stubs(:new).returns(new_mock_queue)
34
+ thread = new_mock_thread
35
+ Thread.stubs(:new).returns(thread)
36
+
37
+ thread.expects(:abort_on_exception=).with(true).times(@size)
38
+
39
+ Async::WorkerPool.new @size
40
+ end
41
+
42
+ should "save each thread into the workers queue" do
43
+ assert_equal @size, Async::WorkerPool.new(@size).workers.size
44
+ end
45
+
46
+ end # context 'given a size'
47
+
48
+
49
+ context "given a job" do
50
+ setup do
51
+ @pool = Async::WorkerPool.new 1
52
+ @command = stub_everything('command')
53
+ @cmd_args = stub_everything('command args')
54
+ @callback = stub_everything('callback')
55
+ end
56
+
57
+ should "remove nils from the command args array and pass the results to the callback" do
58
+ args = [nil, @cmd_args]
59
+ @command.expects(:call).with(@cmd_args).returns(2)
60
+ @callback.expects(:call).with(nil, 2)
61
+
62
+ @pool.enqueue @command, args, @callback
63
+ wait_for_async
64
+ end
65
+
66
+ should "execute the original command with args and pass the results to the callback" do
67
+ @cmd_args.expects(:compact).returns(@cmd_args)
68
+ @command.expects(:call).with(@cmd_args).returns(2)
69
+ @callback.expects(:call).with(nil, 2)
70
+
71
+ @pool.enqueue @command, @cmd_args, @callback
72
+ wait_for_async
73
+ end
74
+
75
+ should "capture any exceptions and pass them to the callback" do
76
+ args = [@cmd_args]
77
+ error = StandardError.new
78
+ @command.expects(:call).with(@cmd_args).raises(error)
79
+ @callback.expects(:call).with(error, nil)
80
+
81
+ @pool.enqueue @command, args, @callback
82
+ wait_for_async
83
+ end
84
+
85
+ should "abort the thread when the callback raises an exception" do
86
+ args = [@cmd_args]
87
+ error = StandardError.new
88
+ @callback.expects(:call).raises(error)
89
+
90
+ assert_raises(StandardError) do
91
+ @pool.enqueue @command, args, @callback
92
+ wait_for_async
93
+ end
94
+ end
95
+ end # context 'given a job'
96
+
97
+
98
+ end
99
+ end
@@ -21,6 +21,13 @@ class ByteBufferTest < Test::Unit::TestCase
21
21
  assert_equal 1, @buf.get
22
22
  end
23
23
 
24
+ def test_unpack
25
+ @buf.put_array([17, 2, 3, 4])
26
+ assert_equal [17, 2, 3, 4], @buf.to_a
27
+ assert_equal ["11020304"], @buf.unpack("H*")
28
+ assert_equal ["11020304"], @buf.to_a("H*")
29
+ end
30
+
24
31
  def test_one_get_returns_array_length_one
25
32
  @buf.put_array([1, 2, 3, 4])
26
33
  @buf.rewind
@@ -0,0 +1,21 @@
1
+ require File.join(File.dirname(__FILE__), '..', '..', '..', 'lib', 'mongo')
2
+ require 'logger'
3
+ require 'rubygems'
4
+ require 'resque'
5
+ require 'sinatra'
6
+ require File.join(File.dirname(__FILE__), 'processor')
7
+
8
+ $con = Mongo::Connection.new
9
+ $db = $con['foo']
10
+
11
+
12
+ configure do
13
+ LOGGER = Logger.new("sinatra.log")
14
+ enable :logging, :dump_errors
15
+ set :raise_errors, true
16
+ end
17
+
18
+ get '/' do
19
+ Processor.perform(1)
20
+ true
21
+ end
@@ -0,0 +1,26 @@
1
+ require 'logger'
2
+
3
+ class Processor
4
+ @queue = :processor
5
+
6
+ def self.connection
7
+ @log ||= Logger.new(STDOUT)
8
+ @con ||= Mongo::Connection.new("localhost", 27017)
9
+ end
10
+
11
+ def self.perform(n)
12
+ begin
13
+ 100.times do |n|
14
+ self.connection['resque']['docs'].insert({:n => n, :data => "0" * 1000}, :safe => true)
15
+ end
16
+
17
+ 5.times do |n|
18
+ num = rand(100)
19
+ self.connection['resque']['docs'].find({:n => {"$gt" => num}}).limit(1).to_a
20
+ end
21
+ rescue => e
22
+ @log.warn(e.inspect)
23
+ end
24
+ end
25
+
26
+ end
@@ -0,0 +1,29 @@
1
+ # set path to app that will be used to configure unicorn,
2
+ # # note the trailing slash in this example
3
+ @dir = "/home/kyle/work/10gen/ruby-driver/test/load/"
4
+
5
+ worker_processes 10
6
+ working_directory @dir
7
+
8
+ preload_app true
9
+
10
+ timeout 30
11
+
12
+ # Specify path to socket unicorn listens to,
13
+ # we will use this in our nginx.conf later
14
+ listen "#{@dir}tmp/sockets/unicorn.sock", :backlog => 64
15
+
16
+ # Set process id path
17
+ pid "#{@dir}tmp/pids/unicorn.pid"
18
+
19
+ # # Set log file paths
20
+ stderr_path "#{@dir}log/unicorn.stderr.log"
21
+ stdout_path "#{@dir}log/unicorn.stdout.log"
22
+
23
+ # NOTE: You need this when using forking web servers!
24
+ after_fork do |server, worker|
25
+ $con.close if $con
26
+ $con = Mongo::Connection.new
27
+ $db = $con['foo']
28
+ STDERR << "FORKED #{server} #{worker}"
29
+ end
@@ -0,0 +1,58 @@
1
+ require 'rubygems'
2
+ require 'mongo'
3
+ require 'sharding_manager'
4
+
5
+ class MongoLoader
6
+
7
+ def initialize
8
+ @mongo = Mongo::Connection.new("localhost", 50000)
9
+ @data = BSON::Binary.new(File.open("tools.gz").read)
10
+ @count = 0
11
+ @manager = ShardingManager.new(:config_count => 3)
12
+ @manager.start_cluster
13
+ end
14
+
15
+ def kill
16
+ @manager.kill_random
17
+ end
18
+
19
+ def restart
20
+ @manager.restart_killed_nodes
21
+ end
22
+
23
+ def run
24
+ Thread.new do
25
+ ("a".."z").each do |p|
26
+ seed(p)
27
+ end
28
+ end
29
+ end
30
+
31
+ def seed(prefix)
32
+ @queue = []
33
+ 1000.times do |n|
34
+ id = BSON::OrderedHash.new
35
+ id[:p] = prefix
36
+ id[:c] = n
37
+ @queue << {:tid => id, :data => @data}
38
+ end
39
+
40
+ while @queue.length > 0 do
41
+ begin
42
+ doc = @queue.pop
43
+ @mongo['app']['photos'].insert(doc, :safe => {:w => 3})
44
+ @count += 1
45
+ p @count
46
+ rescue StandardError => e
47
+ p e
48
+ p @count
49
+ @queue.push(doc)
50
+ @count -= 1
51
+ sleep(10)
52
+ retry
53
+ end
54
+ end
55
+ end
56
+ end
57
+
58
+ @m = MongoLoader.new
@@ -0,0 +1,202 @@
1
+ require 'repl_set_manager'
2
+ require 'thread'
3
+
4
+ class ShardingManager
5
+
6
+ attr_accessor :shards
7
+
8
+ def initialize(opts={})
9
+ @durable = opts.fetch(:durable, true)
10
+ @host = "localhost"
11
+
12
+ @mongos_port = opts[:mongos_port] || 50000
13
+ @config_port = opts[:config_port] || 40000
14
+ @shard_start_port = opts[:start_shard_port] || 30000
15
+ @path = File.join(File.expand_path(File.dirname(__FILE__)), "data")
16
+ system("rm -rf #{@path}")
17
+
18
+ @shard_count = 2
19
+ @mongos_count = 1
20
+ @config_count = opts.fetch(:config_count, 1)
21
+ if ![1, 3].include?(@config_count)
22
+ raise ArgumentError, "Must specify 1 or 3 config servers."
23
+ end
24
+
25
+ @config_servers = {}
26
+ @mongos_servers = {}
27
+ @shards = []
28
+ @ports = []
29
+ end
30
+
31
+ def kill_random
32
+ shard_to_kill = rand(@shard_count)
33
+ @shards[shard_to_kill].kill_primary
34
+ end
35
+
36
+ def restart_killed
37
+ threads = []
38
+ @shards.each do |k, shard|
39
+ threads << Thread.new do
40
+ shard.restart_killed_nodes
41
+ end
42
+ end
43
+ end
44
+
45
+ def start_cluster
46
+ start_sharding_components
47
+ start_mongos_servers
48
+ configure_cluster
49
+ end
50
+
51
+ def configure_cluster
52
+ add_shards
53
+ enable_sharding
54
+ shard_collection
55
+ end
56
+
57
+ def enable_sharding
58
+ mongos['admin'].command({:enablesharding => "app"})
59
+ end
60
+
61
+ def shard_collection
62
+ cmd = BSON::OrderedHash.new
63
+ cmd[:shardcollection] = "app.photos"
64
+ cmd[:key] = {:tid => 1}
65
+ p mongos['admin'].command(cmd)
66
+ end
67
+
68
+ def add_shards
69
+ @shards.each do |shard|
70
+ cmd = {:addshard => shard.shard_string}
71
+ p cmd
72
+ p mongos['admin'].command(cmd)
73
+ end
74
+ p mongos['admin'].command({:listshards => 1})
75
+ end
76
+
77
+ def mongos
78
+ attempt do
79
+ @mongos ||= Mongo::Connection.new(@host, @mongos_servers[0]['port'])
80
+ end
81
+ end
82
+
83
+ private
84
+
85
+ def start_sharding_components
86
+ system("killall mongos")
87
+
88
+ threads = []
89
+ threads << Thread.new do
90
+ start_shards
91
+ end
92
+
93
+ threads << Thread.new do
94
+ start_config_servers
95
+ end
96
+ threads.each {|t| t.join}
97
+ puts "\nShards and config servers up!"
98
+ end
99
+
100
+ def start_shards
101
+ threads = []
102
+ @shard_count.times do |n|
103
+ threads << Thread.new do
104
+ port = @shard_start_port + n * 100
105
+ shard = ReplSetManager.new(:arbiter_count => 0, :secondary_count => 2,
106
+ :passive_count => 0, :start_port => port, :durable => @durable,
107
+ :name => "shard-#{n}")
108
+ shard.start_set
109
+ shard.ensure_up
110
+ @shards << shard
111
+ end
112
+ end
113
+ threads.each {|t| t.join}
114
+ end
115
+
116
+ def start_config_servers
117
+ @config_count.times do |n|
118
+ @config_servers[n] ||= {}
119
+ port = @config_port + n
120
+ @ports << port
121
+ @config_servers[n]['port'] = port
122
+ @config_servers[n]['db_path'] = get_path("config-#{port}")
123
+ @config_servers[n]['log_path'] = get_path("log-config-#{port}")
124
+ system("rm -rf #{@config_servers[n]['db_path']}")
125
+ system("mkdir -p #{@config_servers[n]['db_path']}")
126
+
127
+ @config_servers[n]['start'] = start_config_cmd(n)
128
+
129
+ start(@config_servers, n)
130
+ end
131
+ end
132
+
133
+ def start_mongos_servers
134
+ @mongos_count.times do |n|
135
+ @mongos_servers[n] ||= {}
136
+ port = @mongos_port + n
137
+ @ports << port
138
+ @mongos_servers[n]['port'] = port
139
+ @mongos_servers[n]['db_path'] = get_path("mongos-#{port}")
140
+ @mongos_servers[n]['pidfile_path'] = File.join(@mongos_servers[n]['db_path'], "mongod.lock")
141
+ @mongos_servers[n]['log_path'] = get_path("log-mongos-#{port}")
142
+ system("rm -rf #{@mongos_servers[n]['db_path']}")
143
+ system("mkdir -p #{@mongos_servers[n]['db_path']}")
144
+
145
+ @mongos_servers[n]['start'] = start_mongos_cmd(n)
146
+
147
+ start(@mongos_servers, n)
148
+ end
149
+ end
150
+
151
+ def start_config_cmd(n)
152
+ cmd = "mongod --configsvr --logpath '#{@config_servers[n]['log_path']}' " +
153
+ " --dbpath #{@config_servers[n]['db_path']} --port #{@config_servers[n]['port']} --fork"
154
+ cmd += " --dur" if @durable
155
+ cmd
156
+ end
157
+
158
+ def start_mongos_cmd(n)
159
+ "mongos --configdb #{config_db_string} --logpath '#{@mongos_servers[n]['log_path']}' " +
160
+ "--pidfilepath #{@mongos_servers[n]['pidfile_path']} --port #{@mongos_servers[n]['port']} --fork"
161
+ end
162
+
163
+ def config_db_string
164
+ @config_servers.map do |k, v|
165
+ "#{@host}:#{v['port']}"
166
+ end.join(',')
167
+ end
168
+
169
+ def start(set, node)
170
+ system(set[node]['start'])
171
+ set[node]['up'] = true
172
+ sleep(0.5)
173
+ set[node]['pid'] = File.open(File.join(set[node]['db_path'], 'mongod.lock')).read.strip
174
+ end
175
+ alias :restart :start
176
+
177
+ private
178
+
179
+ def cleanup_config
180
+ end
181
+
182
+ def get_path(name)
183
+ File.join(@path, name)
184
+ end
185
+
186
+ # TODO: put this into a shared module
187
+ def attempt
188
+ raise "No block given!" unless block_given?
189
+ count = 0
190
+
191
+ while count < 50 do
192
+ begin
193
+ return yield
194
+ rescue Mongo::OperationFailure, Mongo::ConnectionFailure
195
+ sleep(1)
196
+ count += 1
197
+ end
198
+ end
199
+
200
+ raise exception
201
+ end
202
+ end