mink 0.1
Sign up to get free protection for your applications and to get access to all the features.
- data/LICENSE.md +13 -0
- data/README.md +2 -0
- data/bin/mink +160 -0
- data/lib/mink.rb +15 -0
- data/lib/mink/helpers/manager_helper.rb +46 -0
- data/lib/mink/managers/auth_repl_set_manager.rb +16 -0
- data/lib/mink/managers/repl_set_manager.rb +266 -0
- data/lib/mink/managers/sharding_manager.rb +198 -0
- data/mink.gemspec +27 -0
- data/templates/replicas.yml +14 -0
- data/templates/shards.yml +32 -0
- metadata +91 -0
data/LICENSE.md
ADDED
@@ -0,0 +1,13 @@
|
|
1
|
+
Copyright 2011 Kyle Banker
|
2
|
+
|
3
|
+
Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
+
you may not use this file except in compliance with the License.
|
5
|
+
You may obtain a copy of the License at
|
6
|
+
|
7
|
+
http://www.apache.org/licenses/LICENSE-2.0
|
8
|
+
|
9
|
+
Unless required by applicable law or agreed to in writing, software
|
10
|
+
distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
+
See the License for the specific language governing permissions and
|
13
|
+
limitations under the License.
|
data/README.md
ADDED
data/bin/mink
ADDED
@@ -0,0 +1,160 @@
|
|
1
|
+
#!/usr/bin/ruby
|
2
|
+
|
3
|
+
$:.unshift(File.join(File.dirname(__FILE__), '..', 'lib'))
|
4
|
+
require "rubygems"
|
5
|
+
require "fileutils"
|
6
|
+
require "yaml"
|
7
|
+
require "mink"
|
8
|
+
|
9
|
+
module Mink
|
10
|
+
module Bin
|
11
|
+
extend self
|
12
|
+
|
13
|
+
VALID_CONFIGS = ["replicas", "shards"]
|
14
|
+
TEMPLATE_BASE = File.join(File.dirname(__FILE__), '..', 'templates')
|
15
|
+
CONFIG_NAME = "mink.yml"
|
16
|
+
|
17
|
+
def init
|
18
|
+
load_config
|
19
|
+
check_mongod
|
20
|
+
|
21
|
+
if @config[:shards]
|
22
|
+
init_shards
|
23
|
+
else
|
24
|
+
init_replicas
|
25
|
+
end
|
26
|
+
end
|
27
|
+
|
28
|
+
def init_shards
|
29
|
+
m = Mink::ShardingManager.new(shard_config)
|
30
|
+
m.start_cluster
|
31
|
+
end
|
32
|
+
|
33
|
+
def init_replicas
|
34
|
+
m = Mink::ReplSetManager.new(replica_set_config)
|
35
|
+
m.start_set
|
36
|
+
end
|
37
|
+
|
38
|
+
def shard_config
|
39
|
+
config = @config[:shards]
|
40
|
+
|
41
|
+
{ :shard_count => config[:shard_count],
|
42
|
+
:mongos_count => config[:mongos_count],
|
43
|
+
:mongos_start_port => config[:mongos_start_port],
|
44
|
+
:config_server_start_port => config[:config_server_start_port],
|
45
|
+
:config_server_count => config[:config_server_count],
|
46
|
+
:shard_database => config[:shard_database],
|
47
|
+
:shard_collection => config[:shard_collection],
|
48
|
+
:shard_key => config[:shard_key],
|
49
|
+
:working_dir => Dir.pwd,
|
50
|
+
:durable => @config[:durability],
|
51
|
+
:replica_set_config => replica_set_config
|
52
|
+
}
|
53
|
+
end
|
54
|
+
|
55
|
+
def replica_set_config
|
56
|
+
config = @config[:replica_set_config]
|
57
|
+
|
58
|
+
{ :replica_count => config[:replica_count],
|
59
|
+
:arbiter_count => config[:arbiter_count],
|
60
|
+
:passive_count => config[:passive_count],
|
61
|
+
:name => config[:name],
|
62
|
+
:start_port => config[:start_port],
|
63
|
+
:mongod_path => @mongod_path,
|
64
|
+
:working_dir => Dir.pwd,
|
65
|
+
:durable => @config[:durability]
|
66
|
+
}
|
67
|
+
end
|
68
|
+
|
69
|
+
def clean
|
70
|
+
system("killall mongod")
|
71
|
+
system("killall mongos")
|
72
|
+
FileUtils.rm_rf(config_path)
|
73
|
+
system("rm -rf #{File.join(Dir.pwd, "*.data")}")
|
74
|
+
system("rm -rf #{File.join(Dir.pwd, "*.log")}")
|
75
|
+
end
|
76
|
+
|
77
|
+
def write_config(name=nil)
|
78
|
+
if VALID_CONFIGS.include?(name)
|
79
|
+
config_source = File.join(TEMPLATE_BASE, name + ".yml")
|
80
|
+
FileUtils.cp(config_source, config_path)
|
81
|
+
STDOUT << <<END_OF_NEXT_STEPS
|
82
|
+
|
83
|
+
You've just created a mink config file at #{config_path}. Feel free to edit it.
|
84
|
+
When you're ready to start the MongoDB cluster, enter
|
85
|
+
|
86
|
+
mink init
|
87
|
+
|
88
|
+
END_OF_NEXT_STEPS
|
89
|
+
else
|
90
|
+
usage
|
91
|
+
end
|
92
|
+
end
|
93
|
+
|
94
|
+
def usage
|
95
|
+
STDERR << <<END_OF_USAGE
|
96
|
+
usage: mink COMMAND
|
97
|
+
|
98
|
+
Mink sets up MongoDB clusters on localhost. Good for testing and experimenting,
|
99
|
+
but definitely NOT FOR PRODUCTION USE.
|
100
|
+
|
101
|
+
make Creates a default config file for the specified cluster type. You have two choices:
|
102
|
+
|
103
|
+
replicas Default config for a replica set with replica sets as shards
|
104
|
+
shards Default config for a stand-alone replica set
|
105
|
+
|
106
|
+
init Starts the MongoDB cluster whose configuration is specified in
|
107
|
+
the mink.yml config file from the current directory.
|
108
|
+
|
109
|
+
clean Kill every running mongod and mongos. Delete mink.yml, all config files, and all data files.
|
110
|
+
Note: use this with caution! You may want to do this manually.
|
111
|
+
|
112
|
+
example:
|
113
|
+
|
114
|
+
mink make replicas
|
115
|
+
mink init
|
116
|
+
|
117
|
+
END_OF_USAGE
|
118
|
+
end
|
119
|
+
|
120
|
+
def load_config
|
121
|
+
config_filename = File.join(Dir.pwd, CONFIG_NAME)
|
122
|
+
if File.exists?(config_filename)
|
123
|
+
@config = YAML.load(File.open(config_filename, "r").read)
|
124
|
+
else
|
125
|
+
STDERR << "\nERROR: Can't find mink.yml in current folder!\nYou may need to run mink make\n\n"
|
126
|
+
usage
|
127
|
+
exit
|
128
|
+
end
|
129
|
+
end
|
130
|
+
|
131
|
+
def check_mongod
|
132
|
+
mongod_path = @config.fetch(:mongod_path, "")
|
133
|
+
if mongod_path.strip.length == 0 || !system(mongod_path + " --version")
|
134
|
+
raise ArgumentError, "mongod not found! Please check your mongod path in #{CONFIG_NAME}"
|
135
|
+
end
|
136
|
+
end
|
137
|
+
|
138
|
+
def config_path
|
139
|
+
File.join(Dir.pwd, CONFIG_NAME)
|
140
|
+
end
|
141
|
+
|
142
|
+
end
|
143
|
+
end
|
144
|
+
|
145
|
+
if ARGV.empty? || ARGV[0] == "-h"
|
146
|
+
Mink::Bin.usage
|
147
|
+
exit
|
148
|
+
else
|
149
|
+
case ARGV[0]
|
150
|
+
when "make" then
|
151
|
+
Mink::Bin.write_config(ARGV[1])
|
152
|
+
when "init" then
|
153
|
+
Mink::Bin.init
|
154
|
+
when "clean" then
|
155
|
+
Mink::Bin.clean
|
156
|
+
else
|
157
|
+
Mink::Bin.usage
|
158
|
+
exit
|
159
|
+
end
|
160
|
+
end
|
data/lib/mink.rb
ADDED
@@ -0,0 +1,15 @@
|
|
1
|
+
STDOUT.sync = true
|
2
|
+
|
3
|
+
$:.unshift(File.join(File.dirname(__FILE__), '..', 'lib'))
|
4
|
+
|
5
|
+
require 'mongo'
|
6
|
+
require 'thread'
|
7
|
+
|
8
|
+
require 'mink/helpers/manager_helper'
|
9
|
+
require 'mink/managers/repl_set_manager'
|
10
|
+
require 'mink/managers/auth_repl_set_manager'
|
11
|
+
require 'mink/managers/sharding_manager'
|
12
|
+
|
13
|
+
module Mink
|
14
|
+
VERSION = "0.1"
|
15
|
+
end
|
@@ -0,0 +1,46 @@
|
|
1
|
+
module Mink
|
2
|
+
module ManagerHelper
|
3
|
+
|
4
|
+
def get_path(name)
|
5
|
+
File.join(@working_dir, name)
|
6
|
+
end
|
7
|
+
|
8
|
+
def attempt
|
9
|
+
raise "No block given!" unless block_given?
|
10
|
+
count = 0
|
11
|
+
begin
|
12
|
+
return yield
|
13
|
+
rescue Mongo::OperationFailure, Mongo::ConnectionFailure => ex
|
14
|
+
sleep(1)
|
15
|
+
count += 1
|
16
|
+
retry if count < 60
|
17
|
+
end
|
18
|
+
|
19
|
+
raise ex
|
20
|
+
end
|
21
|
+
|
22
|
+
def kill_existing_mongods
|
23
|
+
if File.exists?(@pidlistfile)
|
24
|
+
pids = YAML.load(File.open(@pidlistfile, "r").read)
|
25
|
+
kill_pidlist(pids)
|
26
|
+
else
|
27
|
+
system("killall mongod")
|
28
|
+
end
|
29
|
+
end
|
30
|
+
|
31
|
+
def kill_existing_mongos
|
32
|
+
if File.exists?(@pidlistfile)
|
33
|
+
pids = YAML.load(File.open(@pidlistfile, "r").read)
|
34
|
+
kill_pidlist(pids)
|
35
|
+
else
|
36
|
+
system("killall mongos")
|
37
|
+
end
|
38
|
+
end
|
39
|
+
|
40
|
+
def kill_pidlist(pids)
|
41
|
+
pids.each do |pid|
|
42
|
+
system("kill -9 #{pid}")
|
43
|
+
end
|
44
|
+
end
|
45
|
+
end
|
46
|
+
end
|
@@ -0,0 +1,16 @@
|
|
1
|
+
require File.join((File.expand_path(File.dirname(__FILE__))), 'repl_set_manager')
|
2
|
+
|
3
|
+
module Mink
|
4
|
+
class AuthReplSetManager < ReplSetManager
|
5
|
+
def initialize(opts={})
|
6
|
+
super(opts)
|
7
|
+
|
8
|
+
@key_path = opts[:key_path] || File.join(File.expand_path(File.dirname(__FILE__)), "keyfile.txt")
|
9
|
+
system("chmod 600 #{@key_path}")
|
10
|
+
end
|
11
|
+
|
12
|
+
def start_cmd(n)
|
13
|
+
super + " --keyFile #{@key_path}"
|
14
|
+
end
|
15
|
+
end
|
16
|
+
end
|
@@ -0,0 +1,266 @@
|
|
1
|
+
module Mink
|
2
|
+
class ReplSetManager
|
3
|
+
include ManagerHelper
|
4
|
+
|
5
|
+
attr_accessor :host, :start_port, :ports, :name, :mongods, :pids
|
6
|
+
|
7
|
+
def initialize(opts={})
|
8
|
+
@durable = opts.fetch(:durable, false)
|
9
|
+
@start_port = opts.fetch(:start_port, 30000)
|
10
|
+
@name = opts.fetch(:name, 'replica-set-foo')
|
11
|
+
@host = opts.fetch(:host, 'localhost')
|
12
|
+
@working_dir = opts.fetch(:working_dir, nil)
|
13
|
+
@mongod_path = opts.fetch(:mongod_path, "mongod")
|
14
|
+
@write_conf = opts.fetch(:write_conf, false)
|
15
|
+
@write_pids = opts.fetch(:write_pids, false)
|
16
|
+
@replica_count = opts[:replica_count] || 2
|
17
|
+
@arbiter_count = opts[:arbiter_count] || 2
|
18
|
+
@passive_count = opts[:passive_count] || 1
|
19
|
+
check_member_count
|
20
|
+
|
21
|
+
if !@working_dir
|
22
|
+
raise ArgumentError, "A working directory must be specified"
|
23
|
+
end
|
24
|
+
|
25
|
+
@data_path = opts.fetch(:path, File.join(@working_dir, "data"))
|
26
|
+
@pidlistfile = File.join(@working_dir, "mink.pidlist")
|
27
|
+
|
28
|
+
@ports = []
|
29
|
+
@mongods = []
|
30
|
+
@pids = []
|
31
|
+
@config = {"_id" => @name, "members" => []}
|
32
|
+
end
|
33
|
+
|
34
|
+
def start_set
|
35
|
+
puts "** Starting a replica set with #{@count} nodes"
|
36
|
+
kill_existing_mongods
|
37
|
+
|
38
|
+
n = 0
|
39
|
+
@replica_count.times do |n|
|
40
|
+
configure_node(n)
|
41
|
+
n += 1
|
42
|
+
end
|
43
|
+
|
44
|
+
@passive_count.times do
|
45
|
+
configure_node(n) do |attrs|
|
46
|
+
attrs['priority'] = 0
|
47
|
+
end
|
48
|
+
n += 1
|
49
|
+
end
|
50
|
+
|
51
|
+
@arbiter_count.times do
|
52
|
+
configure_node(n) do |attrs|
|
53
|
+
attrs['arbiterOnly'] = true
|
54
|
+
end
|
55
|
+
n += 1
|
56
|
+
end
|
57
|
+
|
58
|
+
write_conf if @write_conf
|
59
|
+
startup_mongods
|
60
|
+
initiate_repl_set
|
61
|
+
ensure_up
|
62
|
+
end
|
63
|
+
|
64
|
+
def cleanup_set
|
65
|
+
system("killall mongod")
|
66
|
+
@mongods.each do |mongod|
|
67
|
+
system("rm -rf #{mongod['db_path']}")
|
68
|
+
end
|
69
|
+
end
|
70
|
+
|
71
|
+
def configure_node(n)
|
72
|
+
@mongods[n] ||= {}
|
73
|
+
port = @start_port + n
|
74
|
+
@ports << port
|
75
|
+
@mongods[n]['port'] = port
|
76
|
+
@mongods[n]['db_path'] = get_path("#{port}.data")
|
77
|
+
@mongods[n]['log_path'] = get_path("#{port}.log")
|
78
|
+
|
79
|
+
@mongods[n]['start'] = start_cmd(n)
|
80
|
+
|
81
|
+
member = {'_id' => n, 'host' => "#{@host}:#{@mongods[n]['port']}"}
|
82
|
+
|
83
|
+
if block_given?
|
84
|
+
custom_attrs = {}
|
85
|
+
yield custom_attrs
|
86
|
+
member.merge!(custom_attrs)
|
87
|
+
@mongods[n].merge!(custom_attrs)
|
88
|
+
end
|
89
|
+
|
90
|
+
@config['members'] << member
|
91
|
+
end
|
92
|
+
|
93
|
+
def start_cmd(n)
|
94
|
+
@mongods[n]['start'] = "#{@mongod_path} --replSet #{@name} --logpath '#{@mongods[n]['log_path']}' " +
|
95
|
+
" --dbpath #{@mongods[n]['db_path']} --port #{@mongods[n]['port']} --fork"
|
96
|
+
@mongods[n]['start'] += " --dur" if @durable
|
97
|
+
@mongods[n]['start']
|
98
|
+
end
|
99
|
+
|
100
|
+
def kill(node, signal=2)
|
101
|
+
pid = @mongods[node]['pid']
|
102
|
+
puts "** Killing node with pid #{pid} at port #{@mongods[node]['port']}"
|
103
|
+
system("kill -#{signal} #{@mongods[node]['pid']}")
|
104
|
+
@mongods[node]['up'] = false
|
105
|
+
sleep(1)
|
106
|
+
end
|
107
|
+
|
108
|
+
def kill_primary(signal=2)
|
109
|
+
node = get_node_with_state(1)
|
110
|
+
kill(node, signal)
|
111
|
+
return node
|
112
|
+
end
|
113
|
+
|
114
|
+
def step_down_primary
|
115
|
+
primary = get_node_with_state(1)
|
116
|
+
con = get_connection(primary)
|
117
|
+
begin
|
118
|
+
con['admin'].command({'replSetStepDown' => 90})
|
119
|
+
rescue Mongo::ConnectionFailure
|
120
|
+
end
|
121
|
+
end
|
122
|
+
|
123
|
+
def kill_secondary
|
124
|
+
node = get_node_with_state(2)
|
125
|
+
kill(node)
|
126
|
+
return node
|
127
|
+
end
|
128
|
+
|
129
|
+
def restart_killed_nodes
|
130
|
+
nodes = @mongods.select do |mongods|
|
131
|
+
@mongods['up'] == false
|
132
|
+
end
|
133
|
+
|
134
|
+
nodes.each do |node|
|
135
|
+
start(node)
|
136
|
+
end
|
137
|
+
|
138
|
+
ensure_up
|
139
|
+
end
|
140
|
+
|
141
|
+
def get_node_from_port(port)
|
142
|
+
@mongods.detect { |mongod| mongod['port'] == port }
|
143
|
+
end
|
144
|
+
|
145
|
+
def start(node)
|
146
|
+
system(@mongods[node]['start'])
|
147
|
+
@mongods[node]['up'] = true
|
148
|
+
sleep(0.5)
|
149
|
+
@mongods[node]['pid'] = File.open(File.join(@mongods[node]['db_path'], 'mongod.lock')).read.strip
|
150
|
+
end
|
151
|
+
alias :restart :start
|
152
|
+
|
153
|
+
def ensure_up
|
154
|
+
print "[RS #{@name}] Ensuring members are up...\n"
|
155
|
+
|
156
|
+
attempt do
|
157
|
+
con = get_connection
|
158
|
+
status = con['admin'].command({'replSetGetStatus' => 1})
|
159
|
+
print "."
|
160
|
+
if status['members'].all? { |m| m['health'] == 1 && [1, 2, 7].include?(m['state']) } &&
|
161
|
+
status['members'].any? { |m| m['state'] == 1 }
|
162
|
+
print "all members up!\n\n"
|
163
|
+
return status
|
164
|
+
else
|
165
|
+
raise Mongo::OperationFailure
|
166
|
+
end
|
167
|
+
end
|
168
|
+
end
|
169
|
+
|
170
|
+
def primary
|
171
|
+
nodes = get_all_host_pairs_with_state(1)
|
172
|
+
nodes.empty? ? nil : nodes[0]
|
173
|
+
end
|
174
|
+
|
175
|
+
def secondaries
|
176
|
+
get_all_host_pairs_with_state(2)
|
177
|
+
end
|
178
|
+
|
179
|
+
def arbiters
|
180
|
+
get_all_host_pairs_with_state(7)
|
181
|
+
end
|
182
|
+
|
183
|
+
# String used for adding a shard via mongos
|
184
|
+
# using the addshard command.
|
185
|
+
def shard_string
|
186
|
+
str = "#{@name}/"
|
187
|
+
str << @mongods.select do |mongod|
|
188
|
+
!mongod['arbiterOnly'] && mongod['priority'] != 0
|
189
|
+
end.map do |mongod|
|
190
|
+
"#{@host}:#{mongod['port']}"
|
191
|
+
end.join(',')
|
192
|
+
str
|
193
|
+
end
|
194
|
+
|
195
|
+
def get_manual_conf
|
196
|
+
end
|
197
|
+
|
198
|
+
def write_conf(filename=nil)
|
199
|
+
end
|
200
|
+
|
201
|
+
private
|
202
|
+
|
203
|
+
def startup_mongods
|
204
|
+
@mongods.each do |mongod|
|
205
|
+
system("rm -rf #{mongod['db_path']}")
|
206
|
+
system("mkdir -p #{mongod['db_path']}")
|
207
|
+
system(mongod['start'])
|
208
|
+
mongod['up'] = true
|
209
|
+
sleep(0.5)
|
210
|
+
pid = File.open(File.join(mongod['db_path'], 'mongod.lock'), "r").read.strip
|
211
|
+
mongod['pid'] = pid
|
212
|
+
@pids << pid
|
213
|
+
end
|
214
|
+
end
|
215
|
+
|
216
|
+
def initiate_repl_set
|
217
|
+
con = get_connection
|
218
|
+
|
219
|
+
attempt do
|
220
|
+
con['admin'].command({'replSetInitiate' => @config})
|
221
|
+
end
|
222
|
+
end
|
223
|
+
|
224
|
+
def get_node_with_state(state)
|
225
|
+
status = ensure_up
|
226
|
+
node = status['members'].detect {|m| m['state'] == state}
|
227
|
+
if node
|
228
|
+
host_port = node['name'].split(':')
|
229
|
+
port = host_port[1] ? host_port[1].to_i : 27017
|
230
|
+
key = @mongods.keys.detect {|key| @mongods[key]['port'] == port}
|
231
|
+
return key
|
232
|
+
else
|
233
|
+
return false
|
234
|
+
end
|
235
|
+
end
|
236
|
+
|
237
|
+
def get_all_host_pairs_with_state(state)
|
238
|
+
status = ensure_up
|
239
|
+
nodes = status['members'].select {|m| m['state'] == state}
|
240
|
+
nodes.map do |node|
|
241
|
+
host_port = node['name'].split(':')
|
242
|
+
port = host_port[1] ? host_port[1].to_i : 27017
|
243
|
+
[host, port]
|
244
|
+
end
|
245
|
+
end
|
246
|
+
|
247
|
+
def get_connection(node=nil)
|
248
|
+
con = attempt do
|
249
|
+
if !node
|
250
|
+
node = @mongods.detect {|mongod| !mongod['arbiterOnly'] && mongod['up'] }
|
251
|
+
end
|
252
|
+
con = Mongo::Connection.new(@host, node['port'], :slave_ok => true)
|
253
|
+
end
|
254
|
+
|
255
|
+
return con
|
256
|
+
end
|
257
|
+
|
258
|
+
def check_member_count
|
259
|
+
@count = @replica_count + @arbiter_count + @passive_count
|
260
|
+
|
261
|
+
if @count > 7
|
262
|
+
raise StandardError, "Cannot create a replica set with #{node_count} nodes. 7 is the max."
|
263
|
+
end
|
264
|
+
end
|
265
|
+
end
|
266
|
+
end
|
@@ -0,0 +1,198 @@
|
|
1
|
+
module Mink
|
2
|
+
class ShardingManager
|
3
|
+
include ManagerHelper
|
4
|
+
|
5
|
+
attr_accessor :shards
|
6
|
+
|
7
|
+
def initialize(opts={})
|
8
|
+
@durable = opts.fetch(:durable, true)
|
9
|
+
|
10
|
+
@mongos_port = opts.fetch(:mongos_start_port, 50000)
|
11
|
+
@config_port = opts.fetch(:config_server_start_port, 40000)
|
12
|
+
@working_dir = opts.fetch(:working_dir, nil)
|
13
|
+
@mongod_path = opts.fetch(:mongod_path, "mongod")
|
14
|
+
@mongos_path = opts.fetch(:mongos_path, "mongos")
|
15
|
+
@write_conf = opts.fetch(:write_conf, false)
|
16
|
+
@host = opts.fetch(:host, "localhost")
|
17
|
+
|
18
|
+
@shard_count = opts.fetch(:shard_count, 2)
|
19
|
+
@mongos_count = opts.fetch(:mongos_count, 1)
|
20
|
+
@config_server_count = opts.fetch(:config_server_count, 1)
|
21
|
+
@replica_set_config = opts.fetch(:replica_set_config, {})
|
22
|
+
|
23
|
+
@shard_db = opts.fetch(:shard_database, "app")
|
24
|
+
@shard_coll = opts.fetch(:shard_collection, "images")
|
25
|
+
@shard_key = opts.fetch(:shard_key, {:tid => 1})
|
26
|
+
|
27
|
+
if ![1, 3].include?(@config_server_count)
|
28
|
+
raise ArgumentError, "Must specify 1 or 3 config servers."
|
29
|
+
end
|
30
|
+
|
31
|
+
@pidlistfile = File.join(@working_dir, "mink.pidlist")
|
32
|
+
@data_path = opts.fetch(:path, File.join(@working_dir, "data"))
|
33
|
+
|
34
|
+
@config_servers = {}
|
35
|
+
@mongos_servers = {}
|
36
|
+
@shards = []
|
37
|
+
@ports = []
|
38
|
+
@pids = []
|
39
|
+
end
|
40
|
+
|
41
|
+
def start_cluster
|
42
|
+
kill_existing_mongods
|
43
|
+
kill_existing_mongos
|
44
|
+
start_sharding_components
|
45
|
+
start_mongos_servers
|
46
|
+
configure_cluster
|
47
|
+
end
|
48
|
+
|
49
|
+
def configure_cluster
|
50
|
+
add_shards
|
51
|
+
enable_sharding
|
52
|
+
if shard_collection
|
53
|
+
STDOUT << "Shard cluster initiated!\nEnter the following to connect:\n mongo localhost:#{@mongos_port}"
|
54
|
+
end
|
55
|
+
end
|
56
|
+
|
57
|
+
def enable_sharding
|
58
|
+
mongos['admin'].command({:enablesharding => @shard_db})
|
59
|
+
end
|
60
|
+
|
61
|
+
def shard_collection
|
62
|
+
cmd = BSON::OrderedHash.new
|
63
|
+
cmd[:shardcollection] = "#{@shard_db}.#{@shard_coll}"
|
64
|
+
cmd[:key] = {:tid => 1}
|
65
|
+
mongos['admin'].command(cmd)
|
66
|
+
end
|
67
|
+
|
68
|
+
def add_shards
|
69
|
+
@shards.each do |shard|
|
70
|
+
cmd = {:addshard => shard.shard_string}
|
71
|
+
cmd
|
72
|
+
p mongos['admin'].command(cmd)
|
73
|
+
end
|
74
|
+
p mongos['admin'].command({:listshards => 1})
|
75
|
+
end
|
76
|
+
|
77
|
+
def mongos
|
78
|
+
attempt do
|
79
|
+
@mongos ||= Mongo::Connection.new(@host, @mongos_servers[0]['port'])
|
80
|
+
end
|
81
|
+
end
|
82
|
+
|
83
|
+
def kill_random
|
84
|
+
shard_to_kill = rand(@shard_count)
|
85
|
+
@shards[shard_to_kill].kill_primary
|
86
|
+
end
|
87
|
+
|
88
|
+
def restart_killed
|
89
|
+
threads = []
|
90
|
+
@shards.each do |k, shard|
|
91
|
+
threads << Thread.new do
|
92
|
+
shard.restart_killed_nodes
|
93
|
+
end
|
94
|
+
end
|
95
|
+
end
|
96
|
+
|
97
|
+
private
|
98
|
+
|
99
|
+
def start_sharding_components
|
100
|
+
system("killall mongos")
|
101
|
+
|
102
|
+
threads = []
|
103
|
+
threads << Thread.new do
|
104
|
+
start_shards
|
105
|
+
end
|
106
|
+
|
107
|
+
threads << Thread.new do
|
108
|
+
start_config_servers
|
109
|
+
end
|
110
|
+
|
111
|
+
threads.each {|t| t.join}
|
112
|
+
|
113
|
+
puts "\nShards and config servers up!"
|
114
|
+
end
|
115
|
+
|
116
|
+
def start_shards
|
117
|
+
threads = []
|
118
|
+
|
119
|
+
@shard_count.times do |n|
|
120
|
+
threads << Thread.new do
|
121
|
+
port = @replica_set_config[:start_port] + n * 100
|
122
|
+
name = "shard-#{n}-#{@replica_set_config[:name]}"
|
123
|
+
shard = ReplSetManager.new(@replica_set_config.merge(:start_port => port, :name => name))
|
124
|
+
shard.start_set
|
125
|
+
@shards << shard
|
126
|
+
end
|
127
|
+
end
|
128
|
+
|
129
|
+
threads.each {|t| t.join}
|
130
|
+
|
131
|
+
@shards.each do |shard|
|
132
|
+
@pids << shard.pids
|
133
|
+
end
|
134
|
+
end
|
135
|
+
|
136
|
+
def start_config_servers
|
137
|
+
@config_server_count.times do |n|
|
138
|
+
@config_servers[n] ||= {}
|
139
|
+
port = @config_port + n
|
140
|
+
@ports << port
|
141
|
+
@config_servers[n]['port'] = port
|
142
|
+
@config_servers[n]['db_path'] = get_path("config-#{port}.data")
|
143
|
+
@config_servers[n]['log_path'] = get_path("config-#{port}.log")
|
144
|
+
system("rm -rf #{@config_servers[n]['db_path']}")
|
145
|
+
system("mkdir -p #{@config_servers[n]['db_path']}")
|
146
|
+
|
147
|
+
@config_servers[n]['start'] = start_config_cmd(n)
|
148
|
+
|
149
|
+
start(@config_servers, n)
|
150
|
+
end
|
151
|
+
end
|
152
|
+
|
153
|
+
def start_mongos_servers
|
154
|
+
@mongos_count.times do |n|
|
155
|
+
@mongos_servers[n] ||= {}
|
156
|
+
port = @mongos_port + n
|
157
|
+
@ports << port
|
158
|
+
@mongos_servers[n]['port'] = port
|
159
|
+
@mongos_servers[n]['db_path'] = get_path("mongos-#{port}.data")
|
160
|
+
@mongos_servers[n]['pidfile_path'] = File.join(@mongos_servers[n]['db_path'], "mongod.lock")
|
161
|
+
@mongos_servers[n]['log_path'] = get_path("mongos-#{port}.log")
|
162
|
+
system("rm -rf #{@mongos_servers[n]['db_path']}")
|
163
|
+
system("mkdir -p #{@mongos_servers[n]['db_path']}")
|
164
|
+
|
165
|
+
@mongos_servers[n]['start'] = start_mongos_cmd(n)
|
166
|
+
|
167
|
+
start(@mongos_servers, n)
|
168
|
+
end
|
169
|
+
end
|
170
|
+
|
171
|
+
def start_config_cmd(n)
|
172
|
+
cmd = "mongod --configsvr --logpath '#{@config_servers[n]['log_path']}' " +
|
173
|
+
" --dbpath #{@config_servers[n]['db_path']} --port #{@config_servers[n]['port']} --fork"
|
174
|
+
cmd += " --dur" if @durable
|
175
|
+
cmd
|
176
|
+
end
|
177
|
+
|
178
|
+
def start_mongos_cmd(n)
|
179
|
+
"mongos --configdb #{config_db_string} --logpath '#{@mongos_servers[n]['log_path']}' " +
|
180
|
+
"--pidfilepath #{@mongos_servers[n]['pidfile_path']} --port #{@mongos_servers[n]['port']} --fork"
|
181
|
+
end
|
182
|
+
|
183
|
+
def config_db_string
|
184
|
+
@config_servers.map do |k, v|
|
185
|
+
"#{@host}:#{v['port']}"
|
186
|
+
end.join(',')
|
187
|
+
end
|
188
|
+
|
189
|
+
def start(set, node)
|
190
|
+
system(set[node]['start'])
|
191
|
+
set[node]['up'] = true
|
192
|
+
sleep(0.5)
|
193
|
+
set[node]['pid'] = File.open(File.join(set[node]['db_path'], 'mongod.lock')).read.strip
|
194
|
+
end
|
195
|
+
alias :restart :start
|
196
|
+
|
197
|
+
end
|
198
|
+
end
|
data/mink.gemspec
ADDED
@@ -0,0 +1,27 @@
|
|
1
|
+
require "./lib/mink"
|
2
|
+
|
3
|
+
Gem::Specification.new do |s|
|
4
|
+
s.name = 'mink'
|
5
|
+
|
6
|
+
s.version = Mink::VERSION
|
7
|
+
|
8
|
+
s.platform = Gem::Platform::RUBY
|
9
|
+
s.summary = 'MongoDB configuations on localhost made easy.'
|
10
|
+
s.description = 'Set up MongoDB shard clusters and replica sets on localhost with ease.'
|
11
|
+
|
12
|
+
s.require_paths = ['lib']
|
13
|
+
|
14
|
+
s.files = ['README.md', 'mink.gemspec', 'LICENSE.md', 'lib/mink.rb']
|
15
|
+
s.files += Dir['lib/mink/**/*.rb']
|
16
|
+
s.files += Dir['templates/**/*.yml']
|
17
|
+
s.files += ['bin/mink']
|
18
|
+
|
19
|
+
s.executables = ['mink']
|
20
|
+
|
21
|
+
s.has_rdoc = false
|
22
|
+
|
23
|
+
s.authors = ['Kyle Banker']
|
24
|
+
s.email = 'kyle@10gen.com'
|
25
|
+
|
26
|
+
s.add_dependency('mongo', ['>= 1.2.0'])
|
27
|
+
end
|
@@ -0,0 +1,14 @@
|
|
1
|
+
---
|
2
|
+
# Note: this assumes that mongod is in your path by default
|
3
|
+
:mongod_path: mongod
|
4
|
+
|
5
|
+
# Whether to enable single-server durability (1.7.5+)
|
6
|
+
:durability: false
|
7
|
+
|
8
|
+
# Each shard will consist of one replica set.
|
9
|
+
:replica_set_config:
|
10
|
+
:replica_count: 2
|
11
|
+
:arbiter_count: 1
|
12
|
+
:passive_count: 0
|
13
|
+
:name: "test-rs"
|
14
|
+
:start_port: 30000
|
@@ -0,0 +1,32 @@
|
|
1
|
+
---
|
2
|
+
# Note: this assumes that mongod and mongos are in your defaul path
|
3
|
+
:mongod_path: mongod
|
4
|
+
:mongos_path: mongos
|
5
|
+
|
6
|
+
# Whether to enable single-server durability (1.7.5+)
|
7
|
+
:durability: false
|
8
|
+
|
9
|
+
# Shard configuration
|
10
|
+
:shards:
|
11
|
+
:shard_count: 2
|
12
|
+
|
13
|
+
# Mongos servers
|
14
|
+
:mongos_start_port: 50000
|
15
|
+
:mongos_count: 1
|
16
|
+
|
17
|
+
# Config servers
|
18
|
+
:config_server_start_port: 40000
|
19
|
+
:config_server_count: 1
|
20
|
+
|
21
|
+
# Pick the collection to shard, along with the shard key
|
22
|
+
:shard_database: "app"
|
23
|
+
:shard_collection: "photos"
|
24
|
+
:shard_key: { photo_ts_id: 1 }
|
25
|
+
|
26
|
+
# Each shard will consist of one replica set.
|
27
|
+
:replica_set_config:
|
28
|
+
:replica_count: 2
|
29
|
+
:arbiter_count: 1
|
30
|
+
:passive_count: 0
|
31
|
+
:name: "test-rs"
|
32
|
+
:start_port: 30000
|
metadata
ADDED
@@ -0,0 +1,91 @@
|
|
1
|
+
--- !ruby/object:Gem::Specification
|
2
|
+
name: mink
|
3
|
+
version: !ruby/object:Gem::Version
|
4
|
+
hash: 9
|
5
|
+
prerelease:
|
6
|
+
segments:
|
7
|
+
- 0
|
8
|
+
- 1
|
9
|
+
version: "0.1"
|
10
|
+
platform: ruby
|
11
|
+
authors:
|
12
|
+
- Kyle Banker
|
13
|
+
autorequire:
|
14
|
+
bindir: bin
|
15
|
+
cert_chain: []
|
16
|
+
|
17
|
+
date: 2011-02-07 00:00:00 -05:00
|
18
|
+
default_executable:
|
19
|
+
dependencies:
|
20
|
+
- !ruby/object:Gem::Dependency
|
21
|
+
name: mongo
|
22
|
+
prerelease: false
|
23
|
+
requirement: &id001 !ruby/object:Gem::Requirement
|
24
|
+
none: false
|
25
|
+
requirements:
|
26
|
+
- - ">="
|
27
|
+
- !ruby/object:Gem::Version
|
28
|
+
hash: 31
|
29
|
+
segments:
|
30
|
+
- 1
|
31
|
+
- 2
|
32
|
+
- 0
|
33
|
+
version: 1.2.0
|
34
|
+
type: :runtime
|
35
|
+
version_requirements: *id001
|
36
|
+
description: Set up MongoDB shard clusters and replica sets on localhost with ease.
|
37
|
+
email: kyle@10gen.com
|
38
|
+
executables:
|
39
|
+
- mink
|
40
|
+
extensions: []
|
41
|
+
|
42
|
+
extra_rdoc_files: []
|
43
|
+
|
44
|
+
files:
|
45
|
+
- README.md
|
46
|
+
- mink.gemspec
|
47
|
+
- LICENSE.md
|
48
|
+
- lib/mink.rb
|
49
|
+
- lib/mink/helpers/manager_helper.rb
|
50
|
+
- lib/mink/managers/repl_set_manager.rb
|
51
|
+
- lib/mink/managers/auth_repl_set_manager.rb
|
52
|
+
- lib/mink/managers/sharding_manager.rb
|
53
|
+
- templates/shards.yml
|
54
|
+
- templates/replicas.yml
|
55
|
+
- bin/mink
|
56
|
+
has_rdoc: false
|
57
|
+
homepage:
|
58
|
+
licenses: []
|
59
|
+
|
60
|
+
post_install_message:
|
61
|
+
rdoc_options: []
|
62
|
+
|
63
|
+
require_paths:
|
64
|
+
- lib
|
65
|
+
required_ruby_version: !ruby/object:Gem::Requirement
|
66
|
+
none: false
|
67
|
+
requirements:
|
68
|
+
- - ">="
|
69
|
+
- !ruby/object:Gem::Version
|
70
|
+
hash: 3
|
71
|
+
segments:
|
72
|
+
- 0
|
73
|
+
version: "0"
|
74
|
+
required_rubygems_version: !ruby/object:Gem::Requirement
|
75
|
+
none: false
|
76
|
+
requirements:
|
77
|
+
- - ">="
|
78
|
+
- !ruby/object:Gem::Version
|
79
|
+
hash: 3
|
80
|
+
segments:
|
81
|
+
- 0
|
82
|
+
version: "0"
|
83
|
+
requirements: []
|
84
|
+
|
85
|
+
rubyforge_project:
|
86
|
+
rubygems_version: 1.4.1
|
87
|
+
signing_key:
|
88
|
+
specification_version: 3
|
89
|
+
summary: MongoDB configuations on localhost made easy.
|
90
|
+
test_files: []
|
91
|
+
|