fluentd 0.12.0.pre.1 → 0.12.0.pre.2
Sign up to get free protection for your applications and to get access to all the features.
Potentially problematic release.
This version of fluentd might be problematic. Click here for more details.
- checksums.yaml +4 -4
- data/.gitignore +1 -1
- data/.travis.yml +1 -0
- data/ChangeLog +21 -0
- data/README.md +10 -2
- data/Rakefile +4 -13
- data/example/v1_literal_example.conf +36 -0
- data/fluentd.gemspec +4 -1
- data/lib/fluent/buffer.rb +73 -46
- data/lib/fluent/command/fluentd.rb +7 -2
- data/lib/fluent/config/basic_parser.rb +5 -0
- data/lib/fluent/config/element.rb +2 -5
- data/lib/fluent/config/literal_parser.rb +26 -7
- data/lib/fluent/config/section.rb +2 -0
- data/lib/fluent/config/v1_parser.rb +9 -2
- data/lib/fluent/formatter.rb +2 -1
- data/lib/fluent/mixin.rb +22 -7
- data/lib/fluent/output.rb +17 -8
- data/lib/fluent/parser.rb +14 -3
- data/lib/fluent/plugin/buf_file.rb +30 -15
- data/lib/fluent/plugin/filter_grep.rb +69 -0
- data/lib/fluent/plugin/filter_record_transformer.rb +183 -0
- data/lib/fluent/plugin/in_exec.rb +6 -0
- data/lib/fluent/plugin/in_forward.rb +34 -4
- data/lib/fluent/plugin/in_http.rb +1 -1
- data/lib/fluent/plugin/out_exec.rb +1 -1
- data/lib/fluent/plugin/out_exec_filter.rb +8 -1
- data/lib/fluent/plugin/out_forward.rb +82 -4
- data/lib/fluent/supervisor.rb +1 -1
- data/lib/fluent/timezone.rb +131 -0
- data/lib/fluent/version.rb +1 -1
- data/test/config/assertions.rb +42 -0
- data/test/config/test_config_parser.rb +385 -0
- data/test/config/test_configurable.rb +530 -0
- data/test/config/test_configure_proxy.rb +99 -0
- data/test/config/test_dsl.rb +237 -0
- data/test/config/test_literal_parser.rb +293 -0
- data/test/config/test_section.rb +112 -0
- data/test/config/test_system_config.rb +49 -0
- data/test/helper.rb +25 -0
- data/test/plugin/test_buf_file.rb +604 -0
- data/test/plugin/test_buf_memory.rb +204 -0
- data/test/plugin/test_filter_grep.rb +124 -0
- data/test/plugin/test_filter_record_transformer.rb +251 -0
- data/test/plugin/test_in_exec.rb +1 -0
- data/test/plugin/test_in_forward.rb +205 -2
- data/test/plugin/test_in_gc_stat.rb +1 -0
- data/test/plugin/test_in_http.rb +58 -2
- data/test/plugin/test_in_object_space.rb +1 -0
- data/test/plugin/test_in_status.rb +1 -0
- data/test/plugin/test_in_stream.rb +1 -1
- data/test/plugin/test_in_syslog.rb +1 -1
- data/test/plugin/test_in_tail.rb +1 -0
- data/test/plugin/test_in_tcp.rb +1 -1
- data/test/plugin/test_in_udp.rb +1 -1
- data/test/plugin/test_out_copy.rb +1 -0
- data/test/plugin/test_out_exec.rb +1 -0
- data/test/plugin/test_out_exec_filter.rb +1 -0
- data/test/plugin/test_out_file.rb +36 -0
- data/test/plugin/test_out_forward.rb +279 -8
- data/test/plugin/test_out_roundrobin.rb +1 -0
- data/test/plugin/test_out_stdout.rb +1 -0
- data/test/plugin/test_out_stream.rb +1 -1
- data/test/test_buffer.rb +530 -0
- data/test/test_config.rb +1 -1
- data/test/test_configdsl.rb +1 -1
- data/test/test_formatter.rb +223 -0
- data/test/test_match.rb +1 -2
- data/test/test_mixin.rb +74 -2
- data/test/test_parser.rb +7 -1
- metadata +88 -35
- data/lib/fluent/plugin/buf_zfile.rb +0 -75
- data/spec/config/config_parser_spec.rb +0 -314
- data/spec/config/configurable_spec.rb +0 -524
- data/spec/config/configure_proxy_spec.rb +0 -96
- data/spec/config/dsl_spec.rb +0 -239
- data/spec/config/helper.rb +0 -49
- data/spec/config/literal_parser_spec.rb +0 -222
- data/spec/config/section_spec.rb +0 -97
- data/spec/config/system_config_spec.rb +0 -49
- data/spec/spec_helper.rb +0 -60
data/test/test_buffer.rb
ADDED
@@ -0,0 +1,530 @@
|
|
1
|
+
require 'helper'
|
2
|
+
require 'fluent/test'
|
3
|
+
require 'fluent/buffer'
|
4
|
+
|
5
|
+
require 'stringio'
|
6
|
+
require 'msgpack'
|
7
|
+
|
8
|
+
module FluentBufferTest
|
9
|
+
class BufferTest < Test::Unit::TestCase
|
10
|
+
def test_buffer_interface
|
11
|
+
buf = Fluent::Buffer.new
|
12
|
+
|
13
|
+
assert buf.respond_to?(:configure)
|
14
|
+
assert buf.respond_to?(:start)
|
15
|
+
assert buf.respond_to?(:shutdown)
|
16
|
+
assert buf.respond_to?(:before_shutdown)
|
17
|
+
|
18
|
+
# virtual methods
|
19
|
+
assert buf.respond_to?(:emit)
|
20
|
+
assert_raise(NotImplementedError){ buf.emit('key', 'data', 'chain') }
|
21
|
+
assert buf.respond_to?(:keys)
|
22
|
+
assert_raise(NotImplementedError){ buf.keys }
|
23
|
+
assert buf.respond_to?(:push)
|
24
|
+
assert_raise(NotImplementedError){ buf.push('key') }
|
25
|
+
assert buf.respond_to?(:pop)
|
26
|
+
assert_raise(NotImplementedError){ buf.pop('out') }
|
27
|
+
assert buf.respond_to?(:clear!)
|
28
|
+
assert_raise(NotImplementedError){ buf.clear! }
|
29
|
+
end
|
30
|
+
|
31
|
+
def test_buffer_does_nothing
|
32
|
+
buf = Fluent::Buffer.new
|
33
|
+
|
34
|
+
buf.start
|
35
|
+
buf.before_shutdown(nil) # out == nil
|
36
|
+
buf.shutdown
|
37
|
+
end
|
38
|
+
end
|
39
|
+
|
40
|
+
class DummyChunk < Fluent::BufferChunk
|
41
|
+
attr_accessor :size, :data, :purged, :closed
|
42
|
+
def initialize(key, size=0)
|
43
|
+
super(key)
|
44
|
+
@size = size
|
45
|
+
end
|
46
|
+
|
47
|
+
def <<(data)
|
48
|
+
@size += data.bytesize
|
49
|
+
end
|
50
|
+
|
51
|
+
def open(&block)
|
52
|
+
StringIO.open(@data, &block)
|
53
|
+
end
|
54
|
+
|
55
|
+
def purge
|
56
|
+
@purged = true
|
57
|
+
end
|
58
|
+
|
59
|
+
def close
|
60
|
+
@closed = true
|
61
|
+
end
|
62
|
+
end
|
63
|
+
|
64
|
+
class BufferChunkTest < Test::Unit::TestCase
|
65
|
+
def test_has_key
|
66
|
+
chunk = Fluent::BufferChunk.new('key')
|
67
|
+
assert_equal 'key', chunk.key
|
68
|
+
end
|
69
|
+
|
70
|
+
def test_buffer_chunk_interface
|
71
|
+
chunk = Fluent::BufferChunk.new('key')
|
72
|
+
|
73
|
+
assert chunk.respond_to?(:empty?)
|
74
|
+
assert chunk.respond_to?(:write_to)
|
75
|
+
assert chunk.respond_to?(:msgpack_each)
|
76
|
+
|
77
|
+
# virtual methods
|
78
|
+
assert chunk.respond_to?(:<<)
|
79
|
+
assert_raise(NotImplementedError){ chunk << 'data' }
|
80
|
+
assert chunk.respond_to?(:size)
|
81
|
+
assert_raise(NotImplementedError){ chunk.size }
|
82
|
+
assert chunk.respond_to?(:close)
|
83
|
+
assert_raise(NotImplementedError){ chunk.close }
|
84
|
+
assert chunk.respond_to?(:purge)
|
85
|
+
assert_raise(NotImplementedError){ chunk.purge }
|
86
|
+
assert chunk.respond_to?(:read)
|
87
|
+
assert_raise(NotImplementedError){ chunk.read }
|
88
|
+
assert chunk.respond_to?(:open)
|
89
|
+
assert_raise(NotImplementedError){ chunk.open }
|
90
|
+
end
|
91
|
+
|
92
|
+
def test_empty?
|
93
|
+
dchunk = DummyChunk.new('key', 1)
|
94
|
+
|
95
|
+
assert !(dchunk.empty?)
|
96
|
+
|
97
|
+
dchunk.size = 0
|
98
|
+
assert dchunk.empty?
|
99
|
+
end
|
100
|
+
|
101
|
+
def test_write_to
|
102
|
+
dummy_chunk = DummyChunk.new('key')
|
103
|
+
dummy_chunk.data = 'foo bar baz'
|
104
|
+
|
105
|
+
dummy_dst = StringIO.new
|
106
|
+
dummy_chunk.write_to(dummy_dst)
|
107
|
+
assert_equal 'foo bar baz', dummy_dst.string
|
108
|
+
end
|
109
|
+
|
110
|
+
def test_msgpack_each
|
111
|
+
dummy_chunk = DummyChunk.new('key')
|
112
|
+
d0 = MessagePack.pack([[1, "foo"], [2, "bar"], [3, "baz"]])
|
113
|
+
d1 = MessagePack.pack({"key1" => "value1", "key2" => "value2"})
|
114
|
+
d2 = MessagePack.pack("string1")
|
115
|
+
d3 = MessagePack.pack(1)
|
116
|
+
d4 = MessagePack.pack(nil)
|
117
|
+
|
118
|
+
dummy_chunk.data = d0 + d1 + d2 + d3 + d4
|
119
|
+
|
120
|
+
store = []
|
121
|
+
dummy_chunk.msgpack_each do |data|
|
122
|
+
store << data
|
123
|
+
end
|
124
|
+
|
125
|
+
assert_equal 5, store.size
|
126
|
+
assert_equal [[1, "foo"], [2, "bar"], [3, "baz"]], store[0]
|
127
|
+
assert_equal({"key1" => "value1", "key2" => "value2"}, store[1])
|
128
|
+
assert_equal "string1", store[2]
|
129
|
+
assert_equal 1, store[3]
|
130
|
+
assert_equal nil, store[4]
|
131
|
+
end
|
132
|
+
end
|
133
|
+
|
134
|
+
class DummyBuffer < Fluent::BasicBuffer
|
135
|
+
attr_accessor :queue, :map, :enqueue_hook_times
|
136
|
+
|
137
|
+
def initialize
|
138
|
+
super
|
139
|
+
@queue = nil
|
140
|
+
@map = nil
|
141
|
+
@enqueue_hook_times = 0
|
142
|
+
end
|
143
|
+
|
144
|
+
def resume
|
145
|
+
return [], {}
|
146
|
+
end
|
147
|
+
|
148
|
+
def new_chunk(key)
|
149
|
+
DummyChunk.new(key)
|
150
|
+
end
|
151
|
+
|
152
|
+
def enqueue(chunk)
|
153
|
+
@enqueue_hook_times += 1
|
154
|
+
end
|
155
|
+
end
|
156
|
+
|
157
|
+
class DummyChain
|
158
|
+
def next
|
159
|
+
true
|
160
|
+
end
|
161
|
+
end
|
162
|
+
|
163
|
+
class BasicBufferTest < Test::Unit::TestCase
|
164
|
+
def test_parallel_pop_default
|
165
|
+
bb = Fluent::BasicBuffer.new
|
166
|
+
|
167
|
+
assert bb.instance_eval{ @parallel_pop }
|
168
|
+
bb.enable_parallel(false)
|
169
|
+
assert !(bb.instance_eval{ @parallel_pop })
|
170
|
+
bb.enable_parallel()
|
171
|
+
assert bb.instance_eval{ @parallel_pop }
|
172
|
+
end
|
173
|
+
|
174
|
+
def test_configure
|
175
|
+
bb1 = Fluent::BasicBuffer.new
|
176
|
+
bb1.configure({})
|
177
|
+
assert_equal 8 * 1024 * 1024, bb1.buffer_chunk_limit
|
178
|
+
assert_equal 256, bb1.buffer_queue_limit
|
179
|
+
|
180
|
+
bb2 = Fluent::BasicBuffer.new
|
181
|
+
bb2.configure({
|
182
|
+
"buffer_chunk_limit" => 256 * 1024 * 1024,
|
183
|
+
"buffer_queue_limit" => 16
|
184
|
+
})
|
185
|
+
assert_equal 256 * 1024 * 1024, bb2.buffer_chunk_limit
|
186
|
+
assert_equal 16, bb2.buffer_queue_limit
|
187
|
+
end
|
188
|
+
|
189
|
+
def test_virtual_methods
|
190
|
+
bb = Fluent::BasicBuffer.new
|
191
|
+
|
192
|
+
assert_raise(NotImplementedError){ bb.new_chunk('key') }
|
193
|
+
assert_raise(NotImplementedError){ bb.resume }
|
194
|
+
assert_raise(NotImplementedError){ bb.enqueue('chunk') }
|
195
|
+
end
|
196
|
+
|
197
|
+
def test_start
|
198
|
+
db = DummyBuffer.new
|
199
|
+
db.start
|
200
|
+
assert_equal([], db.queue)
|
201
|
+
assert_equal({}, db.map)
|
202
|
+
end
|
203
|
+
|
204
|
+
def test_shutdown
|
205
|
+
db1 = DummyBuffer.new
|
206
|
+
db1.start
|
207
|
+
db1.shutdown
|
208
|
+
|
209
|
+
db2 = DummyBuffer.new
|
210
|
+
db2.start
|
211
|
+
|
212
|
+
chunks = [ DummyChunk.new('k1'), DummyChunk.new('k2'), DummyChunk.new('k3'), DummyChunk.new('k4') ]
|
213
|
+
|
214
|
+
db2.queue << chunks[0]
|
215
|
+
db2.queue << chunks[1]
|
216
|
+
db2.map = { 'k3' => chunks[2], 'k4' => chunks[3] }
|
217
|
+
|
218
|
+
db2.shutdown
|
219
|
+
|
220
|
+
assert chunks[0].closed
|
221
|
+
assert chunks[1].closed
|
222
|
+
assert chunks[2].closed
|
223
|
+
assert chunks[3].closed
|
224
|
+
|
225
|
+
assert_equal 0, db2.queue.size
|
226
|
+
end
|
227
|
+
|
228
|
+
def test_storable?
|
229
|
+
db = DummyBuffer.new
|
230
|
+
db.configure({})
|
231
|
+
assert_equal 8 * 1024 * 1024, db.buffer_chunk_limit
|
232
|
+
assert_equal 256, db.buffer_queue_limit
|
233
|
+
|
234
|
+
# db.storable?(chunk, data)
|
235
|
+
chunk0 = DummyChunk.new('k', 0)
|
236
|
+
chunk1 = DummyChunk.new('k', 7 * 1024 * 1024)
|
237
|
+
|
238
|
+
assert db.storable?(chunk0, 'b' * 1024 * 1024)
|
239
|
+
assert db.storable?(chunk0, 'b' * 8 * 1024 * 1024)
|
240
|
+
assert !(db.storable?(chunk0, 'b' * 9 * 1024 * 1024))
|
241
|
+
|
242
|
+
assert db.storable?(chunk1, 'b' * 1024 * 1024)
|
243
|
+
assert !(db.storable?(chunk1, 'b' * ( 1024 * 1024 + 1 ) ))
|
244
|
+
end
|
245
|
+
|
246
|
+
def test_emit
|
247
|
+
db = DummyBuffer.new
|
248
|
+
db.configure({})
|
249
|
+
db.start
|
250
|
+
|
251
|
+
chain = DummyChain.new
|
252
|
+
|
253
|
+
assert_equal 8 * 1024 * 1024, db.buffer_chunk_limit
|
254
|
+
assert_equal 256, db.buffer_queue_limit
|
255
|
+
|
256
|
+
assert_equal 0, db.enqueue_hook_times
|
257
|
+
|
258
|
+
s1m = "a" * 1024 * 1024
|
259
|
+
|
260
|
+
d1 = s1m * 4
|
261
|
+
d2 = s1m * 4 #=> 8
|
262
|
+
d3 = s1m * 1 #=> 9, 1
|
263
|
+
d4 = s1m * 6 #=> 7
|
264
|
+
d5 = s1m * 2 #=> 9, 2
|
265
|
+
d6 = s1m * 9 #=> 11, 9
|
266
|
+
d7 = s1m * 9 #=> 18, 9
|
267
|
+
d8 = s1m * 1 #=> 10, 1
|
268
|
+
d9 = s1m * 2 #=> 3
|
269
|
+
|
270
|
+
assert !(db.emit('key', d1, chain)) # stored in new chunk, and queue is empty
|
271
|
+
assert !(db.map['key'].empty?)
|
272
|
+
assert_equal 0, db.queue.size
|
273
|
+
assert_equal 0, db.enqueue_hook_times
|
274
|
+
|
275
|
+
assert !(db.emit('key', d2, chain)) # just storable, not queued yet.
|
276
|
+
assert_equal 0, db.queue.size
|
277
|
+
assert_equal 0, db.enqueue_hook_times
|
278
|
+
|
279
|
+
assert db.emit('key', d3, chain) # not storable, so old chunk is enqueued & new chunk size is 1m and to be flushed
|
280
|
+
assert_equal 1, db.queue.size
|
281
|
+
assert_equal 1, db.enqueue_hook_times
|
282
|
+
|
283
|
+
assert !(db.emit('key', d4, chain)) # stored in chunk
|
284
|
+
assert_equal 1, db.queue.size
|
285
|
+
assert_equal 1, db.enqueue_hook_times
|
286
|
+
|
287
|
+
assert !(db.emit('key', d5, chain)) # not storable, old chunk is enqueued & new chunk size is 2m
|
288
|
+
# not to be flushed (queue is not empty)
|
289
|
+
assert_equal 2, db.queue.size
|
290
|
+
assert_equal 2, db.enqueue_hook_times
|
291
|
+
|
292
|
+
db.queue.reject!{|v| true } # flush
|
293
|
+
|
294
|
+
assert db.emit('key', d6, chain) # not storable, old chunk is enqueued
|
295
|
+
# new chunk is larger than buffer_chunk_limit
|
296
|
+
# to be flushed
|
297
|
+
assert_equal 1, db.queue.size
|
298
|
+
assert_equal 3, db.enqueue_hook_times
|
299
|
+
|
300
|
+
assert !(db.emit('key', d7, chain)) # chunk before emit is already larger than buffer_chunk_limit, so enqueued
|
301
|
+
# not to be flushed
|
302
|
+
assert_equal 2, db.queue.size
|
303
|
+
assert_equal 4, db.enqueue_hook_times
|
304
|
+
|
305
|
+
db.queue.reject!{|v| true } # flush
|
306
|
+
|
307
|
+
assert db.emit('key', d8, chain) # chunk before emit is already larger than buffer_chunk_limit, so enqueued
|
308
|
+
# to be flushed because just after flushing
|
309
|
+
assert_equal 1, db.queue_size
|
310
|
+
assert_equal 5, db.enqueue_hook_times
|
311
|
+
|
312
|
+
db.queue.reject!{|v| true } # flush
|
313
|
+
|
314
|
+
assert !(db.emit('key', d9, chain)) # stored in chunk
|
315
|
+
assert_equal 0, db.queue_size
|
316
|
+
assert_equal 5, db.enqueue_hook_times
|
317
|
+
end
|
318
|
+
|
319
|
+
def test_keys
|
320
|
+
db = DummyBuffer.new
|
321
|
+
db.start
|
322
|
+
|
323
|
+
chunks = [ DummyChunk.new('k1'), DummyChunk.new('k2'), DummyChunk.new('k3'), DummyChunk.new('k4') ]
|
324
|
+
|
325
|
+
db.queue << chunks[0]
|
326
|
+
db.queue << chunks[1]
|
327
|
+
db.map = { 'k3' => chunks[2], 'k4' => chunks[3] }
|
328
|
+
|
329
|
+
assert_equal ['k3', 'k4'], db.keys
|
330
|
+
end
|
331
|
+
|
332
|
+
def test_queue_size
|
333
|
+
db = DummyBuffer.new
|
334
|
+
db.start
|
335
|
+
|
336
|
+
chunks = [ DummyChunk.new('k1'), DummyChunk.new('k2'), DummyChunk.new('k3'), DummyChunk.new('k4') ]
|
337
|
+
|
338
|
+
db.queue << chunks[0]
|
339
|
+
db.queue << chunks[1]
|
340
|
+
db.map = { 'k3' => chunks[2], 'k4' => chunks[3] }
|
341
|
+
|
342
|
+
assert_equal 2, db.queue_size
|
343
|
+
end
|
344
|
+
|
345
|
+
def test_total_queued_chunk_size
|
346
|
+
db = DummyBuffer.new
|
347
|
+
db.start
|
348
|
+
|
349
|
+
chunks = [ DummyChunk.new('k1', 1000), DummyChunk.new('k2', 2000), DummyChunk.new('k3', 3000), DummyChunk.new('k4', 4000) ]
|
350
|
+
|
351
|
+
db.queue << chunks[0]
|
352
|
+
db.queue << chunks[1]
|
353
|
+
db.map = { 'k3' => chunks[2], 'k4' => chunks[3] }
|
354
|
+
|
355
|
+
assert_equal (1000 + 2000 + 3000 + 4000), db.total_queued_chunk_size
|
356
|
+
end
|
357
|
+
|
358
|
+
def test_push
|
359
|
+
db = DummyBuffer.new
|
360
|
+
db.start
|
361
|
+
|
362
|
+
chunks = [ DummyChunk.new('k1', 1000), DummyChunk.new('k2', 2000), DummyChunk.new('k3', 3000), DummyChunk.new('k4', 4000) ]
|
363
|
+
|
364
|
+
db.map = { 'k1' => chunks[0], 'k2' => chunks[1], 'k3' => chunks[2], 'k4' => chunks[3] }
|
365
|
+
|
366
|
+
assert_equal 0, db.queue.size
|
367
|
+
assert_equal 4, db.map.size
|
368
|
+
|
369
|
+
# if key does not exits, this method doesn't anything, and returns false
|
370
|
+
assert_nil db.map['k5']
|
371
|
+
assert !(db.push('k5'))
|
372
|
+
assert_equal 0, db.queue.size
|
373
|
+
|
374
|
+
# if empty chunk exists for specified key, this method doesn't anything and returns false
|
375
|
+
empty_chunk = DummyChunk.new('key')
|
376
|
+
db.map['k5'] = empty_chunk
|
377
|
+
assert !(db.push('k5'))
|
378
|
+
assert_equal empty_chunk, db.map['k5']
|
379
|
+
|
380
|
+
# if non-empty chunk exists for specified key, that chunk is enqueued, and true returned
|
381
|
+
assert db.push('k3')
|
382
|
+
assert_equal 1, db.queue.size
|
383
|
+
assert_equal 3000, db.queue.first.size
|
384
|
+
assert_nil db.map['k3']
|
385
|
+
assert_equal 1, db.instance_eval{ @enqueue_hook_times }
|
386
|
+
end
|
387
|
+
|
388
|
+
class DummyOutput
|
389
|
+
attr_accessor :written
|
390
|
+
|
391
|
+
def write(chunk)
|
392
|
+
@written = chunk
|
393
|
+
"return value"
|
394
|
+
end
|
395
|
+
end
|
396
|
+
|
397
|
+
def test_pop
|
398
|
+
### pop(out)
|
399
|
+
# 1. find a chunk that not owned (by checking monitor)
|
400
|
+
# 2. return false if @queue is empty or all chunks are already owned
|
401
|
+
# 3. call `write_chunk(chunk, out)` if it isn't empty
|
402
|
+
# 4. remove that chunk from @queue
|
403
|
+
# 5. call `chunk.purge`
|
404
|
+
# 6. return @queue is not empty, or not
|
405
|
+
|
406
|
+
db = DummyBuffer.new
|
407
|
+
db.start
|
408
|
+
out = DummyOutput.new
|
409
|
+
|
410
|
+
assert !(db.pop(out)) # queue is empty
|
411
|
+
assert_nil out.written
|
412
|
+
|
413
|
+
c1 = DummyChunk.new('k1', 1)
|
414
|
+
db.map = { 'k1' => c1 }
|
415
|
+
db.push('k1')
|
416
|
+
assert_equal 1, db.queue.size
|
417
|
+
|
418
|
+
pop_return_value = nil
|
419
|
+
c1.synchronize do
|
420
|
+
pop_return_value = Thread.new {
|
421
|
+
db.pop(out)
|
422
|
+
}.value
|
423
|
+
end
|
424
|
+
assert !(pop_return_value) # a chunk is in queue, and it's owned by another thread
|
425
|
+
assert_equal 1, db.queue.size
|
426
|
+
assert_nil out.written
|
427
|
+
assert_nil c1.purged
|
428
|
+
|
429
|
+
c2 = DummyChunk.new('k2', 1)
|
430
|
+
db.map['k2'] = c2
|
431
|
+
db.push('k2')
|
432
|
+
assert_equal 2, db.queue.size
|
433
|
+
|
434
|
+
pop_return_value = nil
|
435
|
+
c1.synchronize do
|
436
|
+
pop_return_value = Thread.new {
|
437
|
+
c2.synchronize do
|
438
|
+
Thread.new {
|
439
|
+
db.pop(out)
|
440
|
+
}.value
|
441
|
+
end
|
442
|
+
}.value
|
443
|
+
end
|
444
|
+
assert !(pop_return_value) # two chunks are in queue, and these are owned by another thread
|
445
|
+
assert_equal 2, db.queue.size
|
446
|
+
assert_nil out.written
|
447
|
+
assert_nil c1.purged
|
448
|
+
assert_nil c2.purged
|
449
|
+
|
450
|
+
c3 = DummyChunk.new('k3', 1)
|
451
|
+
db.map['k3'] = c3
|
452
|
+
db.push('k3')
|
453
|
+
c4 = DummyChunk.new('k4', 1)
|
454
|
+
db.map['k4'] = c4
|
455
|
+
db.push('k4')
|
456
|
+
assert_equal 4, db.queue.size
|
457
|
+
|
458
|
+
# all of c[1234] are not empty
|
459
|
+
queue_to_be_flushed_more = db.pop(out)
|
460
|
+
assert queue_to_be_flushed_more # queue has more chunks
|
461
|
+
assert c1.purged # the first chunk is shifted, and purged
|
462
|
+
assert_equal c1, out.written # empty chunk is not passed to output plugin
|
463
|
+
assert_equal 3, db.queue.size
|
464
|
+
|
465
|
+
c3.synchronize do
|
466
|
+
queue_to_be_flushed_more = Thread.new {
|
467
|
+
db.pop(out)
|
468
|
+
}.value
|
469
|
+
end
|
470
|
+
assert queue_to_be_flushed_more # c3, c4 exists in queue
|
471
|
+
assert c2.purged
|
472
|
+
assert_equal c2, out.written
|
473
|
+
assert_equal 2, db.queue.size
|
474
|
+
|
475
|
+
c3.synchronize do
|
476
|
+
queue_to_be_flushed_more = Thread.new {
|
477
|
+
db.pop(out)
|
478
|
+
}.value
|
479
|
+
end
|
480
|
+
assert queue_to_be_flushed_more # c3 exists in queue
|
481
|
+
assert c4.purged
|
482
|
+
assert_equal c4, out.written
|
483
|
+
assert_equal 1, db.queue.size
|
484
|
+
|
485
|
+
queue_to_be_flushed_more = db.pop(out)
|
486
|
+
assert c3.purged
|
487
|
+
assert_equal c3, out.written
|
488
|
+
assert_equal 0, db.queue.size
|
489
|
+
end
|
490
|
+
|
491
|
+
def test_write_chunk
|
492
|
+
db = DummyBuffer.new
|
493
|
+
db.start
|
494
|
+
|
495
|
+
chunk = DummyChunk.new('k1', 1)
|
496
|
+
out = DummyOutput.new
|
497
|
+
|
498
|
+
assert_equal "return value", db.write_chunk(chunk, out)
|
499
|
+
assert_equal chunk, out.written
|
500
|
+
end
|
501
|
+
|
502
|
+
def test_clear!
|
503
|
+
db = DummyBuffer.new
|
504
|
+
db.start
|
505
|
+
|
506
|
+
keys = (1..5).map{ |i| "c_#{i}" }
|
507
|
+
chunks = keys.map{ |k| DummyChunk.new(k, 1) }
|
508
|
+
db.map = Hash[ [keys,chunks].transpose ]
|
509
|
+
|
510
|
+
assert_equal 5, db.map.size
|
511
|
+
assert_equal 0, db.queue.size
|
512
|
+
|
513
|
+
db.clear!
|
514
|
+
assert_equal 5, db.map.size
|
515
|
+
assert_equal 0, db.queue.size
|
516
|
+
|
517
|
+
keys.each do |k|
|
518
|
+
db.push(k)
|
519
|
+
end
|
520
|
+
assert_equal 0, db.map.size
|
521
|
+
assert_equal 5, db.queue.size
|
522
|
+
|
523
|
+
db.clear!
|
524
|
+
assert_equal 0, db.map.size
|
525
|
+
assert_equal 0, db.queue.size
|
526
|
+
|
527
|
+
assert chunks.reduce(true){|a,b| a && b.purged }
|
528
|
+
end
|
529
|
+
end
|
530
|
+
end
|