fluent-plugin-mongo 0.6.7 → 0.6.8
Sign up to get free protection for your applications and to get access to all the features.
- data/ChangeLog +14 -1
- data/README.rdoc +12 -0
- data/VERSION +1 -1
- data/fluent-plugin-mongo.gemspec +2 -2
- data/test/plugin/out_mongo.rb +132 -44
- data/test/plugin/out_mongo_tag_mapped.rb +4 -0
- data/test/test_helper.rb +48 -0
- data/test/tools/auth_repl_set_manager.rb +14 -0
- data/test/tools/repl_set_manager.rb +415 -0
- data/test/tools/rs_test_helper.rb +39 -0
- metadata +50 -19
data/ChangeLog
CHANGED
@@ -1,3 +1,16 @@
|
|
1
|
+
Release 0.6.8 - 2012/10/12
|
2
|
+
|
3
|
+
* Lock fluentd gem version with 0.10.x.
|
4
|
+
* Lock mongo gem version with 1.6.x.
|
5
|
+
|
6
|
+
|
7
|
+
Release 0.6.7 - 2012/03/31
|
8
|
+
|
9
|
+
* Fix invaild record handling with BSON::Binary
|
10
|
+
https://github.com/fluent/fluent-plugin-mongo/issues/12
|
11
|
+
* Change disable_collection_check strategy
|
12
|
+
https://github.com/fluent/fluent-plugin-mongo/commit/d840c948f45302ecd73af67c0b0022e3e905f955
|
13
|
+
|
1
14
|
|
2
15
|
Release 0.6.6 - 2012/03/01
|
3
16
|
|
@@ -7,7 +20,7 @@ Release 0.6.6 - 2012/03/01
|
|
7
20
|
|
8
21
|
Release 0.6.5 - 2012/02/27
|
9
22
|
|
10
|
-
*
|
23
|
+
* Fix "mongo_replset unexpectedly requires 'host' in configuration"
|
11
24
|
https://github.com/fluent/fluent-plugin-mongo/issues/9
|
12
25
|
|
13
26
|
|
data/README.rdoc
CHANGED
@@ -209,6 +209,18 @@ You can tail mongo capped collection.
|
|
209
209
|
|
210
210
|
$ mongo-tail -f
|
211
211
|
|
212
|
+
= Test
|
213
|
+
|
214
|
+
Run following command:
|
215
|
+
|
216
|
+
$ bundle exec rake test
|
217
|
+
|
218
|
+
You can use 'mongod' environment variable for specified mongod:
|
219
|
+
|
220
|
+
$ mongod=/path/to/mongod bundle exec rake test
|
221
|
+
|
222
|
+
Note that source code in test/tools are from mongo-ruby-driver.
|
223
|
+
|
212
224
|
= TODO
|
213
225
|
|
214
226
|
== More configuration
|
data/VERSION
CHANGED
@@ -1 +1 @@
|
|
1
|
-
0.6.
|
1
|
+
0.6.8
|
data/fluent-plugin-mongo.gemspec
CHANGED
@@ -16,8 +16,8 @@ Gem::Specification.new do |gem|
|
|
16
16
|
gem.executables = `git ls-files -- bin/*`.split("\n").map{ |f| File.basename(f) }
|
17
17
|
gem.require_paths = ['lib']
|
18
18
|
|
19
|
-
gem.add_dependency "fluentd", "
|
20
|
-
gem.add_dependency "mongo", "
|
19
|
+
gem.add_dependency "fluentd", "~> 0.10.26"
|
20
|
+
gem.add_dependency "mongo", "~> 1.6.4"
|
21
21
|
gem.add_development_dependency "rake", ">= 0.9.2"
|
22
22
|
gem.add_development_dependency "simplecov", ">= 0.5.4"
|
23
23
|
gem.add_development_dependency "rr", ">= 1.0.0"
|
data/test/plugin/out_mongo.rb
CHANGED
@@ -1,39 +1,39 @@
|
|
1
|
-
|
1
|
+
# -*- coding: utf-8 -*-
|
2
|
+
require 'tools/rs_test_helper'
|
2
3
|
|
3
4
|
class MongoOutputTest < Test::Unit::TestCase
|
5
|
+
include MongoTestHelper
|
6
|
+
|
4
7
|
def setup
|
5
8
|
Fluent::Test.setup
|
6
9
|
require 'fluent/plugin/out_mongo'
|
7
|
-
end
|
8
10
|
|
9
|
-
|
10
|
-
|
11
|
-
database fluent
|
12
|
-
collection test
|
13
|
-
]
|
14
|
-
|
15
|
-
def create_driver(conf = CONFIG)
|
16
|
-
Fluent::Test::BufferedOutputTestDriver.new(Fluent::MongoOutput) {
|
17
|
-
def start
|
18
|
-
super
|
19
|
-
end
|
11
|
+
setup_mongod
|
12
|
+
end
|
20
13
|
|
21
|
-
|
22
|
-
|
23
|
-
|
14
|
+
def teardown
|
15
|
+
@db.collection(collection_name).drop
|
16
|
+
teardown_mongod
|
17
|
+
end
|
24
18
|
|
25
|
-
|
26
|
-
|
27
|
-
|
19
|
+
def collection_name
|
20
|
+
'test'
|
21
|
+
end
|
28
22
|
|
29
|
-
|
30
|
-
|
31
|
-
|
23
|
+
def default_config
|
24
|
+
%[
|
25
|
+
type mongo
|
26
|
+
database #{MONGO_DB_DB}
|
27
|
+
collection #{collection_name}
|
28
|
+
]
|
29
|
+
end
|
32
30
|
|
33
|
-
|
34
|
-
|
35
|
-
|
36
|
-
|
31
|
+
def create_driver(conf = default_config)
|
32
|
+
conf = conf + %[
|
33
|
+
port #{@@mongod_port}
|
34
|
+
]
|
35
|
+
@db = Mongo::Connection.new('localhost', @@mongod_port).db(MONGO_DB_DB)
|
36
|
+
Fluent::Test::BufferedOutputTestDriver.new(Fluent::MongoOutput).configure(conf)
|
37
37
|
end
|
38
38
|
|
39
39
|
def test_configure
|
@@ -42,19 +42,17 @@ class MongoOutputTest < Test::Unit::TestCase
|
|
42
42
|
database fluent_test
|
43
43
|
collection test_collection
|
44
44
|
|
45
|
-
host fluenter
|
46
|
-
port 27018
|
47
|
-
|
48
45
|
capped
|
49
46
|
capped_size 100
|
50
47
|
])
|
51
48
|
|
52
49
|
assert_equal('fluent_test', d.instance.database)
|
53
50
|
assert_equal('test_collection', d.instance.collection)
|
54
|
-
assert_equal('
|
55
|
-
assert_equal(
|
51
|
+
assert_equal('localhost', d.instance.host)
|
52
|
+
assert_equal(@@mongod_port, d.instance.port)
|
56
53
|
assert_equal({:capped => true, :size => 100}, d.instance.collection_options)
|
57
|
-
assert_equal(
|
54
|
+
assert_equal(Fluent::BasicBuffer.config_params[:buffer_chunk_limit].last[:default], # more better access?
|
55
|
+
d.instance.instance_variable_get(:@buffer).buffer_chunk_limit)
|
58
56
|
# buffer_chunk_limit moved from configure to start
|
59
57
|
# I will move this test to correct space after BufferedOutputTestDriver supports start method invoking
|
60
58
|
# assert_equal(Fluent::MongoOutput::LIMIT_BEFORE_v1_8, d.instance.instance_variable_get(:@buffer).buffer_chunk_limit)
|
@@ -68,8 +66,9 @@ class MongoOutputTest < Test::Unit::TestCase
|
|
68
66
|
d.emit({'a' => 2}, time)
|
69
67
|
d.expect_format([time, {'a' => 1, d.instance.time_key => time}].to_msgpack)
|
70
68
|
d.expect_format([time, {'a' => 2, d.instance.time_key => time}].to_msgpack)
|
71
|
-
|
72
69
|
d.run
|
70
|
+
|
71
|
+
assert_equal(2, @db.collection(collection_name).count)
|
73
72
|
end
|
74
73
|
|
75
74
|
def emit_documents(d)
|
@@ -79,30 +78,119 @@ class MongoOutputTest < Test::Unit::TestCase
|
|
79
78
|
time
|
80
79
|
end
|
81
80
|
|
81
|
+
def get_documents
|
82
|
+
@db.collection(collection_name).find().to_a.map { |e| e.delete('_id'); e }
|
83
|
+
end
|
84
|
+
|
82
85
|
def test_write
|
83
86
|
d = create_driver
|
84
87
|
t = emit_documents(d)
|
85
88
|
|
86
|
-
|
87
|
-
|
88
|
-
|
89
|
-
assert_equal(
|
90
|
-
end
|
91
|
-
|
92
|
-
def test_write_with_invalid_recoreds
|
93
|
-
skip('Implement this test using BSON directory later')
|
89
|
+
d.run
|
90
|
+
documents = get_documents.map { |e| e['a'] }.sort
|
91
|
+
assert_equal([1, 2], documents)
|
92
|
+
assert_equal(2, documents.size)
|
94
93
|
end
|
95
94
|
|
96
95
|
def test_write_at_enable_tag
|
97
|
-
d = create_driver(
|
96
|
+
d = create_driver(default_config + %[
|
98
97
|
include_tag_key true
|
99
98
|
include_time_key false
|
100
99
|
])
|
101
100
|
t = emit_documents(d)
|
102
101
|
|
103
|
-
|
102
|
+
d.run
|
103
|
+
documents = get_documents.sort_by { |e| e['a'] }
|
104
104
|
assert_equal([{'a' => 1, d.instance.tag_key => 'test'},
|
105
105
|
{'a' => 2, d.instance.tag_key => 'test'}], documents)
|
106
|
-
assert_equal(
|
106
|
+
assert_equal(2, documents.size)
|
107
|
+
end
|
108
|
+
|
109
|
+
def emit_invalid_documents(d)
|
110
|
+
time = Time.parse("2011-01-02 13:14:15 UTC").to_i
|
111
|
+
d.emit({'a' => 3, '$last' => '石動'}, time)
|
112
|
+
d.emit({'a' => 4, 'first' => '菖蒲'.encode('EUC-JP').force_encoding('UTF-8')}, time)
|
113
|
+
time
|
114
|
+
end
|
115
|
+
|
116
|
+
def test_write_with_invalid_recoreds
|
117
|
+
d = create_driver
|
118
|
+
t = emit_documents(d)
|
119
|
+
t = emit_invalid_documents(d)
|
120
|
+
|
121
|
+
d.run
|
122
|
+
documents = get_documents
|
123
|
+
assert_equal(4, documents.size)
|
124
|
+
assert_equal([1, 2], documents.select { |e| e.has_key?('a') }.map { |e| e['a'] }.sort)
|
125
|
+
assert_equal(2, documents.select { |e| e.has_key?(Fluent::MongoOutput::BROKEN_DATA_KEY)}.size)
|
126
|
+
assert_equal([3, 4], @db.collection(collection_name).find({Fluent::MongoOutput::BROKEN_DATA_KEY => {'$exists' => true}}).map { |doc|
|
127
|
+
Marshal.load(doc[Fluent::MongoOutput::BROKEN_DATA_KEY].to_s)['a']
|
128
|
+
}.sort)
|
129
|
+
end
|
130
|
+
|
131
|
+
def test_write_with_invalid_recoreds_at_ignore
|
132
|
+
d = create_driver(default_config + %[
|
133
|
+
ignore_invalid_record true
|
134
|
+
])
|
135
|
+
t = emit_documents(d)
|
136
|
+
t = emit_invalid_documents(d)
|
137
|
+
|
138
|
+
d.run
|
139
|
+
documents = get_documents
|
140
|
+
assert_equal(2, documents.size)
|
141
|
+
assert_equal([1, 2], documents.select { |e| e.has_key?('a') }.map { |e| e['a'] }.sort)
|
142
|
+
assert_equal(true, @db.collection(collection_name).find({Fluent::MongoOutput::BROKEN_DATA_KEY => {'$exists' => true}}).count.zero?)
|
143
|
+
end
|
144
|
+
end
|
145
|
+
|
146
|
+
class MongoReplOutputTest < MongoOutputTest
|
147
|
+
def setup
|
148
|
+
Fluent::Test.setup
|
149
|
+
require 'fluent/plugin/out_mongo_replset'
|
150
|
+
|
151
|
+
ensure_rs
|
152
|
+
end
|
153
|
+
|
154
|
+
def teardown
|
155
|
+
@rs.restart_killed_nodes
|
156
|
+
if defined?(@db) && @db
|
157
|
+
@db.collection(collection_name).drop
|
158
|
+
@db.connection.close
|
159
|
+
end
|
160
|
+
end
|
161
|
+
|
162
|
+
def default_config
|
163
|
+
%[
|
164
|
+
type mongo_replset
|
165
|
+
database #{MONGO_DB_DB}
|
166
|
+
collection #{collection_name}
|
167
|
+
nodes #{build_seeds(3).join(',')}
|
168
|
+
num_retries 30
|
169
|
+
]
|
170
|
+
end
|
171
|
+
|
172
|
+
def create_driver(conf = default_config)
|
173
|
+
@db = Mongo::ReplSetConnection.new(build_seeds(3), :name => @rs.name).db(MONGO_DB_DB)
|
174
|
+
Fluent::Test::BufferedOutputTestDriver.new(Fluent::MongoOutputReplset).configure(conf)
|
175
|
+
end
|
176
|
+
|
177
|
+
def test_configure
|
178
|
+
d = create_driver(%[
|
179
|
+
type mongo_replset
|
180
|
+
|
181
|
+
database fluent_test
|
182
|
+
collection test_collection
|
183
|
+
nodes #{build_seeds(3).join(',')}
|
184
|
+
num_retries 45
|
185
|
+
|
186
|
+
capped
|
187
|
+
capped_size 100
|
188
|
+
])
|
189
|
+
|
190
|
+
assert_equal('fluent_test', d.instance.database)
|
191
|
+
assert_equal('test_collection', d.instance.collection)
|
192
|
+
assert_equal(build_seeds(3), d.instance.nodes)
|
193
|
+
assert_equal(45, d.instance.num_retries)
|
194
|
+
assert_equal({:capped => true, :size => 100}, d.instance.collection_options)
|
107
195
|
end
|
108
196
|
end
|
data/test/test_helper.rb
CHANGED
@@ -18,3 +18,51 @@ end
|
|
18
18
|
|
19
19
|
require 'test/unit'
|
20
20
|
require 'fluent/test'
|
21
|
+
|
22
|
+
# for testing
|
23
|
+
|
24
|
+
def unused_port
|
25
|
+
s = TCPServer.open(0)
|
26
|
+
port = s.addr[1]
|
27
|
+
s.close
|
28
|
+
port
|
29
|
+
end
|
30
|
+
|
31
|
+
# for MongoDB
|
32
|
+
|
33
|
+
require 'mongo'
|
34
|
+
|
35
|
+
MONGO_DB_DB = 'fluent_test'
|
36
|
+
MONGO_DB_PATH = File.join(File.dirname(__FILE__), 'plugin', 'data')
|
37
|
+
|
38
|
+
module MongoTestHelper
|
39
|
+
@@setup_count = 0
|
40
|
+
|
41
|
+
def cleanup_mongod_env
|
42
|
+
system("killall mongod")
|
43
|
+
system("rm -rf #{MONGO_DB_PATH}")
|
44
|
+
system("mkdir -p #{MONGO_DB_PATH}")
|
45
|
+
end
|
46
|
+
|
47
|
+
def setup_mongod
|
48
|
+
unless defined?(@@current_mongo_test_class) and @@current_mongo_test_class == self.class
|
49
|
+
cleanup_mongod_env
|
50
|
+
|
51
|
+
@@current_mongo_test_class = self.class
|
52
|
+
@@mongod_port = unused_port
|
53
|
+
@@pid = spawn(ENV['mongod'], "--port=#{@@mongod_port}", "--dbpath=#{MONGO_DB_PATH}")
|
54
|
+
sleep 3
|
55
|
+
end
|
56
|
+
|
57
|
+
@@setup_count += 1;
|
58
|
+
end
|
59
|
+
|
60
|
+
def teardown_mongod
|
61
|
+
if defined?(@@current_mongo_test_class)
|
62
|
+
Mongo::Connection.new('localhost', @@mongod_port).drop_database(MONGO_DB_DB)
|
63
|
+
end
|
64
|
+
if @@setup_count == self.class.methods.size
|
65
|
+
cleanup_mongod_env
|
66
|
+
end
|
67
|
+
end
|
68
|
+
end
|
@@ -0,0 +1,14 @@
|
|
1
|
+
require File.join((File.expand_path(File.dirname(__FILE__))), 'repl_set_manager')
|
2
|
+
|
3
|
+
class AuthReplSetManager < ReplSetManager
|
4
|
+
def initialize(opts={})
|
5
|
+
super(opts)
|
6
|
+
|
7
|
+
@key_path = opts[:key_path] || File.join(File.expand_path(File.dirname(__FILE__)), "keyfile.txt")
|
8
|
+
system("chmod 600 #{@key_path}")
|
9
|
+
end
|
10
|
+
|
11
|
+
def start_cmd(n)
|
12
|
+
super + " --keyFile #{@key_path}"
|
13
|
+
end
|
14
|
+
end
|
@@ -0,0 +1,415 @@
|
|
1
|
+
require 'thread'
|
2
|
+
|
3
|
+
STDOUT.sync = true
|
4
|
+
|
5
|
+
require 'mongo'
|
6
|
+
#unless defined? Mongo
|
7
|
+
# require File.join(File.dirname(__FILE__), '..', '..', 'lib', 'mongo')
|
8
|
+
#end
|
9
|
+
|
10
|
+
class ReplSetManager
|
11
|
+
|
12
|
+
attr_accessor :host, :start_port, :ports, :name, :mongods, :tags, :version
|
13
|
+
|
14
|
+
def initialize(opts={})
|
15
|
+
@mongod = ENV['mongod'] || 'mongod'
|
16
|
+
@start_port = opts[:start_port] || 30000
|
17
|
+
@ports = []
|
18
|
+
@name = opts[:name] || 'replica-set-foo'
|
19
|
+
@host = opts[:host] || 'localhost'
|
20
|
+
@retries = opts[:retries] || 30
|
21
|
+
@config = {"_id" => @name, "members" => []}
|
22
|
+
@durable = opts.fetch(:durable, false)
|
23
|
+
@smallfiles = opts.fetch(:smallfiles, true)
|
24
|
+
@prealloc = opts.fetch(:prealloc, false)
|
25
|
+
@path = File.join(File.expand_path(File.dirname(__FILE__)), "data")
|
26
|
+
@oplog_size = opts.fetch(:oplog_size, 16)
|
27
|
+
@tags = [{"dc" => "ny", "rack" => "a", "db" => "main"},
|
28
|
+
{"dc" => "ny", "rack" => "b", "db" => "main"},
|
29
|
+
{"dc" => "sf", "rack" => "a", "db" => "main"}]
|
30
|
+
|
31
|
+
@arbiter_count = opts[:arbiter_count] || 0
|
32
|
+
@secondary_count = opts[:secondary_count] || 2
|
33
|
+
@passive_count = opts[:passive_count] || 0
|
34
|
+
@primary_count = 1
|
35
|
+
|
36
|
+
@count = @primary_count + @passive_count + @arbiter_count + @secondary_count
|
37
|
+
if @count > 7
|
38
|
+
raise StandardError, "Cannot create a replica set with #{node_count} nodes. 7 is the max."
|
39
|
+
end
|
40
|
+
|
41
|
+
@mongods = {}
|
42
|
+
version_string = `#{@mongod} --version`
|
43
|
+
version_string =~ /(\d\.\d\.\d)/
|
44
|
+
@version = $1.split(".").map {|d| d.to_i }
|
45
|
+
end
|
46
|
+
|
47
|
+
def start_set
|
48
|
+
system("killall mongod")
|
49
|
+
sleep(1)
|
50
|
+
should_start = true
|
51
|
+
puts "** Starting a replica set with #{@count} nodes"
|
52
|
+
|
53
|
+
n = 0
|
54
|
+
(@primary_count + @secondary_count).times do
|
55
|
+
init_node(n, should_start) do |attrs|
|
56
|
+
if @version[0] >= 2
|
57
|
+
attrs['tags'] = @tags[n % @tags.size]
|
58
|
+
end
|
59
|
+
end
|
60
|
+
n += 1
|
61
|
+
end
|
62
|
+
|
63
|
+
@passive_count.times do
|
64
|
+
init_node(n, should_start) do |attrs|
|
65
|
+
attrs['priority'] = 0
|
66
|
+
end
|
67
|
+
n += 1
|
68
|
+
end
|
69
|
+
|
70
|
+
@arbiter_count.times do
|
71
|
+
init_node(n, should_start) do |attrs|
|
72
|
+
attrs['arbiterOnly'] = true
|
73
|
+
end
|
74
|
+
n += 1
|
75
|
+
end
|
76
|
+
|
77
|
+
initiate
|
78
|
+
ensure_up
|
79
|
+
end
|
80
|
+
|
81
|
+
def cleanup_set
|
82
|
+
system("killall mongod")
|
83
|
+
@count.times do |n|
|
84
|
+
system("rm -rf #{@mongods[n]['db_path']}")
|
85
|
+
end
|
86
|
+
end
|
87
|
+
|
88
|
+
def init_node(n, should_start=true)
|
89
|
+
@mongods[n] ||= {}
|
90
|
+
port = @start_port + n
|
91
|
+
@ports << port
|
92
|
+
@mongods[n]['port'] = port
|
93
|
+
@mongods[n]['db_path'] = get_path("rs-#{port}")
|
94
|
+
@mongods[n]['log_path'] = get_path("log-#{port}")
|
95
|
+
@mongods[n]['start'] = start_cmd(n)
|
96
|
+
|
97
|
+
if should_start
|
98
|
+
system("rm -rf #{@mongods[n]['db_path']}")
|
99
|
+
system("mkdir -p #{@mongods[n]['db_path']}")
|
100
|
+
start(n)
|
101
|
+
end
|
102
|
+
|
103
|
+
member = {'_id' => n, 'host' => "#{@host}:#{@mongods[n]['port']}"}
|
104
|
+
|
105
|
+
if block_given?
|
106
|
+
custom_attrs = {}
|
107
|
+
yield custom_attrs
|
108
|
+
member.merge!(custom_attrs)
|
109
|
+
@mongods[n].merge!(custom_attrs)
|
110
|
+
end
|
111
|
+
|
112
|
+
@config['members'] << member
|
113
|
+
end
|
114
|
+
|
115
|
+
def journal_switch
|
116
|
+
if @version[0] >= 2
|
117
|
+
if @durable
|
118
|
+
"--journal"
|
119
|
+
else
|
120
|
+
"--nojournal"
|
121
|
+
end
|
122
|
+
elsif @durable
|
123
|
+
"--journal"
|
124
|
+
end
|
125
|
+
end
|
126
|
+
|
127
|
+
def start_cmd(n)
|
128
|
+
@mongods[n]['start'] = "#{@mongod} --replSet #{@name} --logpath '#{@mongods[n]['log_path']}' " +
|
129
|
+
"--oplogSize #{@oplog_size} #{journal_switch} --dbpath #{@mongods[n]['db_path']} --port #{@mongods[n]['port']} --fork"
|
130
|
+
@mongods[n]['start'] += " --dur" if @durable
|
131
|
+
@mongods[n]['start'] += " --smallfiles" if @smallfiles
|
132
|
+
@mongods[n]['start'] += " --noprealloc" unless @prealloc
|
133
|
+
@mongods[n]['start']
|
134
|
+
end
|
135
|
+
|
136
|
+
def remove_secondary_node
|
137
|
+
primary = get_node_with_state(1)
|
138
|
+
con = get_connection(primary)
|
139
|
+
config = con['local']['system.replset'].find_one
|
140
|
+
secondary = get_node_with_state(2)
|
141
|
+
host_port = "#{@host}:#{@mongods[secondary]['port']}"
|
142
|
+
kill(secondary)
|
143
|
+
@mongods.delete(secondary)
|
144
|
+
@config['members'].reject! {|m| m['host'] == host_port}
|
145
|
+
@config['version'] = config['version'] + 1
|
146
|
+
|
147
|
+
begin
|
148
|
+
con['admin'].command({'replSetReconfig' => @config})
|
149
|
+
rescue Mongo::ConnectionFailure
|
150
|
+
end
|
151
|
+
|
152
|
+
con.close
|
153
|
+
|
154
|
+
return secondary
|
155
|
+
end
|
156
|
+
|
157
|
+
def add_node(n=nil, &block)
|
158
|
+
primary = get_node_with_state(1)
|
159
|
+
con = get_connection(primary)
|
160
|
+
|
161
|
+
init_node(n || @mongods.length, &block)
|
162
|
+
config = con['local']['system.replset'].find_one
|
163
|
+
@config['version'] = config['version'] + 1
|
164
|
+
|
165
|
+
# We expect a connection failure on reconfigure here.
|
166
|
+
begin
|
167
|
+
con['admin'].command({'replSetReconfig' => @config})
|
168
|
+
rescue Mongo::ConnectionFailure
|
169
|
+
end
|
170
|
+
|
171
|
+
con.close
|
172
|
+
ensure_up
|
173
|
+
end
|
174
|
+
|
175
|
+
def add_arbiter
|
176
|
+
add_node do |attrs|
|
177
|
+
attrs['arbiterOnly'] = true
|
178
|
+
end
|
179
|
+
end
|
180
|
+
|
181
|
+
def wait_for_death(pid)
|
182
|
+
@retries.times do
|
183
|
+
if `ps a | grep mongod`.include?("#{pid}")
|
184
|
+
puts "waiting for mongod @ pid #{pid} to die..."
|
185
|
+
sleep(1)
|
186
|
+
else
|
187
|
+
puts "mongod @ pid #{pid} was killed successfully"
|
188
|
+
return true
|
189
|
+
end
|
190
|
+
end
|
191
|
+
puts "mongod never died"
|
192
|
+
return false
|
193
|
+
end
|
194
|
+
|
195
|
+
def kill(node, signal=2)
|
196
|
+
pid = @mongods[node]['pid']
|
197
|
+
puts "** Killing node with pid #{pid} at port #{@mongods[node]['port']}"
|
198
|
+
system("kill #{pid}")
|
199
|
+
dead = wait_for_death(pid)
|
200
|
+
@mongods[node]['up'] = false if dead
|
201
|
+
end
|
202
|
+
|
203
|
+
def kill_primary(signal=2)
|
204
|
+
node = get_node_with_state(1)
|
205
|
+
kill(node, signal)
|
206
|
+
return node
|
207
|
+
end
|
208
|
+
|
209
|
+
# Note that we have to rescue a connection failure
|
210
|
+
# when we run the StepDown command because that
|
211
|
+
# command will close the connection.
|
212
|
+
def step_down_primary
|
213
|
+
primary = get_node_with_state(1)
|
214
|
+
con = get_connection(primary)
|
215
|
+
begin
|
216
|
+
con['admin'].command({'replSetStepDown' => 90})
|
217
|
+
rescue Mongo::ConnectionFailure
|
218
|
+
end
|
219
|
+
con.close
|
220
|
+
end
|
221
|
+
|
222
|
+
def kill_secondary
|
223
|
+
node = get_node_with_state(2)
|
224
|
+
kill(node)
|
225
|
+
return node
|
226
|
+
end
|
227
|
+
|
228
|
+
def kill_all_secondaries
|
229
|
+
nodes = get_all_nodes_with_state(2)
|
230
|
+
if nodes
|
231
|
+
nodes.each do |n|
|
232
|
+
kill(n)
|
233
|
+
end
|
234
|
+
end
|
235
|
+
end
|
236
|
+
|
237
|
+
def restart_killed_nodes
|
238
|
+
nodes = @mongods.keys.select do |key|
|
239
|
+
@mongods[key]['up'] == false
|
240
|
+
end
|
241
|
+
|
242
|
+
nodes.each do |node|
|
243
|
+
start(node)
|
244
|
+
end
|
245
|
+
|
246
|
+
ensure_up
|
247
|
+
end
|
248
|
+
|
249
|
+
def get_node_from_port(port)
|
250
|
+
@mongods.keys.detect { |key| @mongods[key]['port'] == port }
|
251
|
+
end
|
252
|
+
|
253
|
+
def start(node)
|
254
|
+
system(@mongods[node]['start'])
|
255
|
+
@mongods[node]['up'] = true
|
256
|
+
sleep(0.5)
|
257
|
+
@mongods[node]['pid'] = File.open(File.join(@mongods[node]['db_path'], 'mongod.lock')).read.strip
|
258
|
+
end
|
259
|
+
alias :restart :start
|
260
|
+
|
261
|
+
def ensure_up(n=nil, connection=nil)
|
262
|
+
print "** Ensuring members are up..."
|
263
|
+
|
264
|
+
attempt(n) do
|
265
|
+
print "."
|
266
|
+
con = connection || get_connection
|
267
|
+
begin
|
268
|
+
status = con['admin'].command({:replSetGetStatus => 1})
|
269
|
+
rescue Mongo::OperationFailure => ex
|
270
|
+
con.close
|
271
|
+
raise ex
|
272
|
+
end
|
273
|
+
if status['members'].all? { |m| m['health'] == 1 &&
|
274
|
+
[1, 2, 7].include?(m['state']) } &&
|
275
|
+
status['members'].any? { |m| m['state'] == 1 }
|
276
|
+
|
277
|
+
connections = []
|
278
|
+
states = []
|
279
|
+
status['members'].each do |member|
|
280
|
+
begin
|
281
|
+
host, port = member['name'].split(':')
|
282
|
+
port = port.to_i
|
283
|
+
conn = Mongo::Connection.new(host, port, :slave_ok => true)
|
284
|
+
connections << conn
|
285
|
+
state = conn['admin'].command({:ismaster => 1})
|
286
|
+
states << state
|
287
|
+
rescue Mongo::ConnectionFailure
|
288
|
+
connections.each {|c| c.close }
|
289
|
+
con.close
|
290
|
+
raise Mongo::OperationFailure
|
291
|
+
end
|
292
|
+
end
|
293
|
+
|
294
|
+
if states.any? {|s| s['ismaster']}
|
295
|
+
print "all members up!\n\n"
|
296
|
+
connections.each {|c| c.close }
|
297
|
+
con.close
|
298
|
+
return status
|
299
|
+
else
|
300
|
+
con.close
|
301
|
+
raise Mongo::OperationFailure
|
302
|
+
end
|
303
|
+
else
|
304
|
+
con.close
|
305
|
+
raise Mongo::OperationFailure
|
306
|
+
end
|
307
|
+
end
|
308
|
+
return false
|
309
|
+
end
|
310
|
+
|
311
|
+
def primary
|
312
|
+
nodes = get_all_host_pairs_with_state(1)
|
313
|
+
nodes.empty? ? nil : nodes[0]
|
314
|
+
end
|
315
|
+
|
316
|
+
def secondaries
|
317
|
+
get_all_host_pairs_with_state(2)
|
318
|
+
end
|
319
|
+
|
320
|
+
def arbiters
|
321
|
+
get_all_host_pairs_with_state(7)
|
322
|
+
end
|
323
|
+
|
324
|
+
# String used for adding a shard via mongos
|
325
|
+
# using the addshard command.
|
326
|
+
def shard_string
|
327
|
+
str = "#{@name}/"
|
328
|
+
str << @mongods.map do |k, mongod|
|
329
|
+
"#{@host}:#{mongod['port']}"
|
330
|
+
end.join(',')
|
331
|
+
str
|
332
|
+
end
|
333
|
+
|
334
|
+
private
|
335
|
+
|
336
|
+
def initiate
|
337
|
+
puts "Initiating replica set..."
|
338
|
+
con = get_connection
|
339
|
+
|
340
|
+
attempt do
|
341
|
+
con.object_id
|
342
|
+
con['admin'].command({'replSetInitiate' => @config})
|
343
|
+
end
|
344
|
+
|
345
|
+
con.close
|
346
|
+
end
|
347
|
+
|
348
|
+
def get_all_nodes_with_state(state)
|
349
|
+
status = ensure_up
|
350
|
+
nodes = status['members'].select {|m| m['state'] == state}
|
351
|
+
nodes = nodes.map do |node|
|
352
|
+
host_port = node['name'].split(':')
|
353
|
+
port = host_port[1] ? host_port[1].to_i : 27017
|
354
|
+
@mongods.keys.detect {|key| @mongods[key]['port'] == port}
|
355
|
+
end
|
356
|
+
|
357
|
+
nodes == [] ? false : nodes
|
358
|
+
end
|
359
|
+
|
360
|
+
def get_node_with_state(state)
|
361
|
+
status = ensure_up
|
362
|
+
node = status['members'].detect {|m| m['state'] == state}
|
363
|
+
if node
|
364
|
+
host_port = node['name'].split(':')
|
365
|
+
port = host_port[1] ? host_port[1].to_i : 27017
|
366
|
+
key = @mongods.keys.detect {|n| @mongods[n]['port'] == port}
|
367
|
+
return key
|
368
|
+
else
|
369
|
+
return false
|
370
|
+
end
|
371
|
+
end
|
372
|
+
|
373
|
+
def get_all_host_pairs_with_state(state)
|
374
|
+
status = ensure_up
|
375
|
+
nodes = status['members'].select {|m| m['state'] == state}
|
376
|
+
nodes.map do |node|
|
377
|
+
host_port = node['name'].split(':')
|
378
|
+
port = host_port[1] ? host_port[1].to_i : 27017
|
379
|
+
[host, port]
|
380
|
+
end
|
381
|
+
end
|
382
|
+
|
383
|
+
def get_connection(node=nil)
|
384
|
+
con = attempt do
|
385
|
+
if !node
|
386
|
+
node = @mongods.keys.detect {|key| !@mongods[key]['arbiterOnly'] && @mongods[key]['up'] }
|
387
|
+
end
|
388
|
+
con = Mongo::Connection.new(@host, @mongods[node]['port'], :slave_ok => true)
|
389
|
+
end
|
390
|
+
|
391
|
+
return con
|
392
|
+
end
|
393
|
+
|
394
|
+
def get_path(name)
|
395
|
+
File.join(@path, name)
|
396
|
+
end
|
397
|
+
|
398
|
+
def attempt(retries=nil)
|
399
|
+
raise "No block given!" unless block_given?
|
400
|
+
count = 0
|
401
|
+
|
402
|
+
while count < (retries || @retries) do
|
403
|
+
begin
|
404
|
+
return yield
|
405
|
+
rescue Mongo::OperationFailure, Mongo::ConnectionFailure => ex
|
406
|
+
sleep(2)
|
407
|
+
count += 1
|
408
|
+
end
|
409
|
+
end
|
410
|
+
|
411
|
+
puts "NO MORE ATTEMPTS"
|
412
|
+
raise ex
|
413
|
+
end
|
414
|
+
|
415
|
+
end
|
@@ -0,0 +1,39 @@
|
|
1
|
+
$:.unshift(File.join(File.dirname(__FILE__), '..', 'lib'))
|
2
|
+
require 'test_helper'
|
3
|
+
require 'tools/repl_set_manager'
|
4
|
+
|
5
|
+
class Test::Unit::TestCase
|
6
|
+
# Ensure replica set is available as an instance variable and that
|
7
|
+
# a new set is spun up for each TestCase class
|
8
|
+
def ensure_rs
|
9
|
+
unless defined?(@@current_class) and @@current_class == self.class
|
10
|
+
@@current_class = self.class
|
11
|
+
@@rs = ReplSetManager.new
|
12
|
+
@@rs.start_set
|
13
|
+
end
|
14
|
+
@rs = @@rs
|
15
|
+
end
|
16
|
+
|
17
|
+
# Generic code for rescuing connection failures and retrying operations.
|
18
|
+
# This could be combined with some timeout functionality.
|
19
|
+
def rescue_connection_failure(max_retries=30)
|
20
|
+
retries = 0
|
21
|
+
begin
|
22
|
+
yield
|
23
|
+
rescue Mongo::ConnectionFailure => ex
|
24
|
+
puts "Rescue attempt #{retries}: from #{ex}"
|
25
|
+
retries += 1
|
26
|
+
raise ex if retries > max_retries
|
27
|
+
sleep(2)
|
28
|
+
retry
|
29
|
+
end
|
30
|
+
end
|
31
|
+
|
32
|
+
def build_seeds(num_hosts)
|
33
|
+
seeds = []
|
34
|
+
num_hosts.times do |n|
|
35
|
+
seeds << "#{@rs.host}:#{@rs.ports[n]}"
|
36
|
+
end
|
37
|
+
seeds
|
38
|
+
end
|
39
|
+
end
|
metadata
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: fluent-plugin-mongo
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.6.
|
4
|
+
version: 0.6.8
|
5
5
|
prerelease:
|
6
6
|
platform: ruby
|
7
7
|
authors:
|
@@ -9,33 +9,43 @@ authors:
|
|
9
9
|
autorequire:
|
10
10
|
bindir: bin
|
11
11
|
cert_chain: []
|
12
|
-
date: 2012-
|
12
|
+
date: 2012-10-11 00:00:00.000000000 Z
|
13
13
|
dependencies:
|
14
14
|
- !ruby/object:Gem::Dependency
|
15
15
|
name: fluentd
|
16
|
-
requirement:
|
16
|
+
requirement: !ruby/object:Gem::Requirement
|
17
17
|
none: false
|
18
18
|
requirements:
|
19
|
-
- -
|
19
|
+
- - ~>
|
20
20
|
- !ruby/object:Gem::Version
|
21
|
-
version: 0.10.
|
21
|
+
version: 0.10.26
|
22
22
|
type: :runtime
|
23
23
|
prerelease: false
|
24
|
-
version_requirements:
|
24
|
+
version_requirements: !ruby/object:Gem::Requirement
|
25
|
+
none: false
|
26
|
+
requirements:
|
27
|
+
- - ~>
|
28
|
+
- !ruby/object:Gem::Version
|
29
|
+
version: 0.10.26
|
25
30
|
- !ruby/object:Gem::Dependency
|
26
31
|
name: mongo
|
27
|
-
requirement:
|
32
|
+
requirement: !ruby/object:Gem::Requirement
|
28
33
|
none: false
|
29
34
|
requirements:
|
30
|
-
- -
|
35
|
+
- - ~>
|
31
36
|
- !ruby/object:Gem::Version
|
32
|
-
version: 1.6.
|
37
|
+
version: 1.6.4
|
33
38
|
type: :runtime
|
34
39
|
prerelease: false
|
35
|
-
version_requirements:
|
40
|
+
version_requirements: !ruby/object:Gem::Requirement
|
41
|
+
none: false
|
42
|
+
requirements:
|
43
|
+
- - ~>
|
44
|
+
- !ruby/object:Gem::Version
|
45
|
+
version: 1.6.4
|
36
46
|
- !ruby/object:Gem::Dependency
|
37
47
|
name: rake
|
38
|
-
requirement:
|
48
|
+
requirement: !ruby/object:Gem::Requirement
|
39
49
|
none: false
|
40
50
|
requirements:
|
41
51
|
- - ! '>='
|
@@ -43,10 +53,15 @@ dependencies:
|
|
43
53
|
version: 0.9.2
|
44
54
|
type: :development
|
45
55
|
prerelease: false
|
46
|
-
version_requirements:
|
56
|
+
version_requirements: !ruby/object:Gem::Requirement
|
57
|
+
none: false
|
58
|
+
requirements:
|
59
|
+
- - ! '>='
|
60
|
+
- !ruby/object:Gem::Version
|
61
|
+
version: 0.9.2
|
47
62
|
- !ruby/object:Gem::Dependency
|
48
63
|
name: simplecov
|
49
|
-
requirement:
|
64
|
+
requirement: !ruby/object:Gem::Requirement
|
50
65
|
none: false
|
51
66
|
requirements:
|
52
67
|
- - ! '>='
|
@@ -54,10 +69,15 @@ dependencies:
|
|
54
69
|
version: 0.5.4
|
55
70
|
type: :development
|
56
71
|
prerelease: false
|
57
|
-
version_requirements:
|
72
|
+
version_requirements: !ruby/object:Gem::Requirement
|
73
|
+
none: false
|
74
|
+
requirements:
|
75
|
+
- - ! '>='
|
76
|
+
- !ruby/object:Gem::Version
|
77
|
+
version: 0.5.4
|
58
78
|
- !ruby/object:Gem::Dependency
|
59
79
|
name: rr
|
60
|
-
requirement:
|
80
|
+
requirement: !ruby/object:Gem::Requirement
|
61
81
|
none: false
|
62
82
|
requirements:
|
63
83
|
- - ! '>='
|
@@ -65,7 +85,12 @@ dependencies:
|
|
65
85
|
version: 1.0.0
|
66
86
|
type: :development
|
67
87
|
prerelease: false
|
68
|
-
version_requirements:
|
88
|
+
version_requirements: !ruby/object:Gem::Requirement
|
89
|
+
none: false
|
90
|
+
requirements:
|
91
|
+
- - ! '>='
|
92
|
+
- !ruby/object:Gem::Version
|
93
|
+
version: 1.0.0
|
69
94
|
description: MongoDB plugin for Fluent event collector
|
70
95
|
email: repeatedly@gmail.com
|
71
96
|
executables:
|
@@ -94,6 +119,9 @@ files:
|
|
94
119
|
- test/plugin/out_mongo.rb
|
95
120
|
- test/plugin/out_mongo_tag_mapped.rb
|
96
121
|
- test/test_helper.rb
|
122
|
+
- test/tools/auth_repl_set_manager.rb
|
123
|
+
- test/tools/repl_set_manager.rb
|
124
|
+
- test/tools/rs_test_helper.rb
|
97
125
|
homepage: https://github.com/fluent/fluent-plugin-mongo
|
98
126
|
licenses: []
|
99
127
|
post_install_message:
|
@@ -108,7 +136,7 @@ required_ruby_version: !ruby/object:Gem::Requirement
|
|
108
136
|
version: '0'
|
109
137
|
segments:
|
110
138
|
- 0
|
111
|
-
hash:
|
139
|
+
hash: 4031633710892322981
|
112
140
|
required_rubygems_version: !ruby/object:Gem::Requirement
|
113
141
|
none: false
|
114
142
|
requirements:
|
@@ -117,10 +145,10 @@ required_rubygems_version: !ruby/object:Gem::Requirement
|
|
117
145
|
version: '0'
|
118
146
|
segments:
|
119
147
|
- 0
|
120
|
-
hash:
|
148
|
+
hash: 4031633710892322981
|
121
149
|
requirements: []
|
122
150
|
rubyforge_project:
|
123
|
-
rubygems_version: 1.8.
|
151
|
+
rubygems_version: 1.8.24
|
124
152
|
signing_key:
|
125
153
|
specification_version: 3
|
126
154
|
summary: MongoDB plugin for Fluent event collector
|
@@ -129,3 +157,6 @@ test_files:
|
|
129
157
|
- test/plugin/out_mongo.rb
|
130
158
|
- test/plugin/out_mongo_tag_mapped.rb
|
131
159
|
- test/test_helper.rb
|
160
|
+
- test/tools/auth_repl_set_manager.rb
|
161
|
+
- test/tools/repl_set_manager.rb
|
162
|
+
- test/tools/rs_test_helper.rb
|