fluent-plugin-buffer-event_limited 0.1.4 → 0.1.5
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/README.md +3 -7
- data/fluent-plugin-buffer-event_limited.gemspec +2 -1
- data/lib/fluent/plugin/buf_event_limited.rb +97 -33
- data/test/plugin/test_buf_event_limited.rb +73 -80
- metadata +16 -2
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA1:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 9ab9aa64fad78b7f58fb453867cee20b4c87b7fa
|
4
|
+
data.tar.gz: 5443046e647187e4bf5ee215bcb925ddc6a50092
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 88483e3ca720d2518657c686b5fa5a0a3c3cacb8689e302725d628d08d557ea0efcec28b85c6ad1c70430c154cc702fc9766c05bfcd662249877c9d9c1c5c218
|
7
|
+
data.tar.gz: 653ab305d9e8c33fd4898494c5f8bb1c96ee14b66f2df9d463101cf8ac4183a7caa7f188e213f57af462266e9ecbe7e3b0b02e0578a24fd1c5b14adb05de5d50
|
data/README.md
CHANGED
@@ -2,10 +2,9 @@
|
|
2
2
|
|
3
3
|
This gem is a mutation of the [fluent-plugin-buffer-lightening](https://github.com/tagomoris/fluent-plugin-buffer-lightening) buffer plugin by [tagomoris](https://github.com/tagomoris).
|
4
4
|
|
5
|
-
|
6
|
-
|
7
|
-
|
8
|
-
These options are to decrease latency from emit to write, and to control chunk sizes and flush sizes.
|
5
|
+
* The buffer is able to limit the number of events that are buffered in a buffer chunk.
|
6
|
+
* The buffer only supports output plugins that return msgpack from their `#format` methods.
|
7
|
+
* The buffer doesn't check the bytesize of the buffers just the number of messages
|
9
8
|
|
10
9
|
## Installation
|
11
10
|
|
@@ -33,9 +32,6 @@ Options of `buffer_type file` are also available:
|
|
33
32
|
buffer_type event_limited
|
34
33
|
buffer_chunk_limit 10M
|
35
34
|
buffer_chunk_records_limit 100
|
36
|
-
buffer_chunk_message_separator newline
|
37
|
-
# buffer_chunk_message_separator tab
|
38
|
-
# buffer_chunk_message_separator msgpack
|
39
35
|
# other options...
|
40
36
|
</match>
|
41
37
|
```
|
@@ -2,7 +2,7 @@
|
|
2
2
|
|
3
3
|
Gem::Specification.new do |spec|
|
4
4
|
spec.name = "fluent-plugin-buffer-event_limited"
|
5
|
-
spec.version = "0.1.
|
5
|
+
spec.version = "0.1.5"
|
6
6
|
spec.authors = ["TAGOMORI Satoshi", 'Gergo Sulymosi']
|
7
7
|
spec.email = ["tagomoris@gmail.com", 'gergo.sulymosi@gmail.com']
|
8
8
|
spec.description = %q{Fluentd memory buffer plugin with many types of chunk limits}
|
@@ -18,5 +18,6 @@ Gem::Specification.new do |spec|
|
|
18
18
|
spec.add_development_dependency "bundler", "~> 1.3"
|
19
19
|
spec.add_development_dependency "rake"
|
20
20
|
spec.add_development_dependency "test-unit"
|
21
|
+
spec.add_development_dependency "pry"
|
21
22
|
spec.add_runtime_dependency "fluentd", ">= 0.10.42"
|
22
23
|
end
|
@@ -1,39 +1,57 @@
|
|
1
1
|
require 'fluent/plugin/buf_file'
|
2
|
+
require 'stringio'
|
2
3
|
|
3
4
|
module Fluent
|
4
|
-
class
|
5
|
-
attr_reader :
|
5
|
+
class MessagePackFormattedBufferData
|
6
|
+
attr_reader :data
|
7
|
+
|
8
|
+
def initialize(data)
|
9
|
+
@data = data.to_str.freeze
|
10
|
+
end
|
6
11
|
|
7
|
-
def
|
8
|
-
|
9
|
-
init_counter(separator)
|
12
|
+
def records
|
13
|
+
@records ||= (data.empty? ? [] : unpack(data)).freeze
|
10
14
|
end
|
11
15
|
|
12
|
-
def
|
13
|
-
|
14
|
-
|
16
|
+
def as_events
|
17
|
+
records.dup
|
18
|
+
end
|
15
19
|
|
16
|
-
|
20
|
+
def size
|
21
|
+
@size ||= records.size
|
17
22
|
end
|
18
23
|
|
24
|
+
alias_method :to_str, :data
|
25
|
+
alias_method :as_msg_pack, :data
|
26
|
+
|
19
27
|
private
|
20
28
|
|
21
|
-
def
|
22
|
-
|
23
|
-
|
24
|
-
|
25
|
-
|
26
|
-
|
27
|
-
|
28
|
-
|
29
|
-
|
30
|
-
|
31
|
-
|
32
|
-
|
33
|
-
|
29
|
+
def unpack(data)
|
30
|
+
MessagePack::Unpacker.new(StringIO.new(data)).each.to_a
|
31
|
+
end
|
32
|
+
end
|
33
|
+
|
34
|
+
class EventLimitedBufferChunk < FileBufferChunk
|
35
|
+
attr_reader :record_count, :limit
|
36
|
+
|
37
|
+
def initialize(key, path, unique_id, limit, mode = "a+")
|
38
|
+
super(key, path, unique_id, mode = "a+")
|
39
|
+
@limit = limit
|
40
|
+
@record_count = MessagePackFormattedBufferData.new(read).size
|
41
|
+
end
|
42
|
+
|
43
|
+
def <<(data, record_count)
|
44
|
+
super(data)
|
45
|
+
@record_count += record_count
|
46
|
+
end
|
47
|
+
alias_method :write, :<<
|
48
|
+
|
49
|
+
def remaining_capacity
|
50
|
+
@limit - record_count
|
51
|
+
end
|
34
52
|
|
35
|
-
|
36
|
-
|
53
|
+
def full?
|
54
|
+
record_count >= limit
|
37
55
|
end
|
38
56
|
end
|
39
57
|
|
@@ -41,11 +59,35 @@ module Fluent
|
|
41
59
|
Fluent::Plugin.register_buffer('event_limited', self)
|
42
60
|
|
43
61
|
config_param :buffer_chunk_records_limit, :integer, :default => Float::INFINITY
|
44
|
-
config_param :buffer_chunk_message_separator, :string, :default => 'msgpack'
|
45
62
|
|
46
|
-
def
|
47
|
-
|
48
|
-
|
63
|
+
def emit(key, data, chain)
|
64
|
+
data = MessagePackFormattedBufferData.new(data)
|
65
|
+
key = key.to_s
|
66
|
+
flush_trigger = false
|
67
|
+
|
68
|
+
synchronize do
|
69
|
+
# Get the active chunk if it exists
|
70
|
+
chunk = (@map[key] ||= new_chunk(key))
|
71
|
+
|
72
|
+
# Partition the data into chunks that can be written into new chunks
|
73
|
+
events = data.as_events
|
74
|
+
[
|
75
|
+
events.shift(chunk.remaining_capacity),
|
76
|
+
*events.each_slice(@buffer_chunk_records_limit)
|
77
|
+
].each do |event_group|
|
78
|
+
chunk, queue_size = rotate_chunk!(chunk, key)
|
79
|
+
# Trigger flush only when we put the first chunk into it
|
80
|
+
flush_trigger ||= (queue_size == 0)
|
81
|
+
|
82
|
+
chain.next
|
83
|
+
chunk.write(
|
84
|
+
event_group.map { |d| MessagePack.pack(d) }.join(''),
|
85
|
+
event_group.size
|
86
|
+
)
|
87
|
+
end
|
88
|
+
|
89
|
+
return flush_trigger
|
90
|
+
end
|
49
91
|
end
|
50
92
|
|
51
93
|
def new_chunk(key)
|
@@ -53,7 +95,7 @@ module Fluent
|
|
53
95
|
path, tsuffix = make_path(encoded_key, 'b')
|
54
96
|
unique_id = tsuffix_to_unique_id(tsuffix)
|
55
97
|
|
56
|
-
|
98
|
+
chunk_factory(key, path, unique_id, 'a+')
|
57
99
|
end
|
58
100
|
|
59
101
|
# Copied here from
|
@@ -74,11 +116,9 @@ module Fluent
|
|
74
116
|
|
75
117
|
case bq
|
76
118
|
when 'b'
|
77
|
-
|
78
|
-
maps << [timestamp, chunk]
|
119
|
+
maps << [timestamp, chunk_factory(key, path, unique_id, 'a+')]
|
79
120
|
when 'q'
|
80
|
-
|
81
|
-
queues << [timestamp, chunk]
|
121
|
+
queues << [timestamp, chunk_factory(key, path, unique_id, 'r')]
|
82
122
|
end
|
83
123
|
end
|
84
124
|
|
@@ -93,5 +133,29 @@ module Fluent
|
|
93
133
|
|
94
134
|
return queue, map
|
95
135
|
end
|
136
|
+
|
137
|
+
private
|
138
|
+
|
139
|
+
def rotate_chunk!(chunk, key)
|
140
|
+
queue_size = nil
|
141
|
+
return chunk unless chunk.full?
|
142
|
+
|
143
|
+
@queue.synchronize do
|
144
|
+
queue_size = @queue.size
|
145
|
+
enqueue(chunk) # this is buffer enqueue *hook*
|
146
|
+
@queue << chunk
|
147
|
+
chunk = (@map[key] = new_chunk(key))
|
148
|
+
end
|
149
|
+
|
150
|
+
return chunk, queue_size
|
151
|
+
end
|
152
|
+
|
153
|
+
def storable?(chunk, data)
|
154
|
+
(chunk.record_count + data.size) <= @buffer_chunk_records_limit
|
155
|
+
end
|
156
|
+
|
157
|
+
def chunk_factory(key, path, uniq_id, mode)
|
158
|
+
EventLimitedBufferChunk.new(key, path, uniq_id, @buffer_chunk_records_limit, mode)
|
159
|
+
end
|
96
160
|
end
|
97
161
|
end
|
@@ -29,7 +29,6 @@ class EventLimitedFileBufferTest < Test::Unit::TestCase
|
|
29
29
|
flush_interval 0.1
|
30
30
|
try_flush_interval 0.03
|
31
31
|
buffer_chunk_records_limit 10
|
32
|
-
buffer_chunk_message_separator newline
|
33
32
|
buffer_path #{@buffer_path}
|
34
33
|
]
|
35
34
|
end
|
@@ -43,7 +42,6 @@ class EventLimitedFileBufferTest < Test::Unit::TestCase
|
|
43
42
|
def create_buffer_with_attributes(config = {})
|
44
43
|
config = {
|
45
44
|
'buffer_path' => @buffer_path,
|
46
|
-
'buffer_chunk_message_separator' => 'newline'
|
47
45
|
}.merge(config)
|
48
46
|
buf = Fluent::EventLimitedFileBuffer.new
|
49
47
|
Fluent::EventLimitedFileBuffer.send(:class_variable_set, :'@@buffer_paths', {})
|
@@ -62,124 +60,123 @@ class EventLimitedFileBufferTest < Test::Unit::TestCase
|
|
62
60
|
assert_equal 0.1, output.flush_interval
|
63
61
|
assert_equal 0.03, output.try_flush_interval
|
64
62
|
assert_equal 10, buffer.buffer_chunk_records_limit
|
65
|
-
assert_equal 'newline', buffer.buffer_chunk_message_separator
|
66
63
|
end
|
67
64
|
|
68
65
|
def test_emit
|
69
66
|
d = create_driver
|
70
|
-
|
71
67
|
buffer = d.instance.instance_variable_get(:@buffer)
|
72
|
-
|
73
|
-
buffer.start
|
68
|
+
count_buffer_events = -> { buffer.instance_variable_get(:@map)[''].record_count }
|
74
69
|
|
75
|
-
|
70
|
+
buffer.start
|
71
|
+
assert_nil buffer.instance_variable_get(:@map)[''], "No chunks on start"
|
76
72
|
|
77
73
|
d.emit({"a" => 1})
|
78
|
-
assert_equal 1,
|
74
|
+
assert_equal 1, count_buffer_events.call
|
79
75
|
|
80
|
-
|
81
|
-
|
82
|
-
d.emit({"a" => 8});
|
83
|
-
assert_equal 8, buffer.instance_variable_get(:@map)[''].record_counter
|
76
|
+
(2..9).each { |i| d.emit({"a" => i}) }
|
77
|
+
assert_equal 9, count_buffer_events.call
|
84
78
|
|
85
79
|
chain = DummyChain.new
|
86
80
|
tag = d.instance.instance_variable_get(:@tag)
|
87
81
|
time = Time.now.to_i
|
88
82
|
|
89
83
|
# flush_trigger false
|
90
|
-
assert !buffer.emit(tag, d.instance.format(tag, time, {"a" =>
|
91
|
-
assert_equal
|
92
|
-
|
93
|
-
# flush_trigger false
|
94
|
-
assert !buffer.emit(tag, d.instance.format(tag, time, {"a" => 10}), chain)
|
95
|
-
assert_equal 10, buffer.instance_variable_get(:@map)[''].record_counter
|
84
|
+
assert !buffer.emit(tag, d.instance.format(tag, time, {"a" => 10}), chain), "Shouldn't trigger flush"
|
85
|
+
assert_equal 10, count_buffer_events.call
|
96
86
|
|
97
87
|
# flush_trigger true
|
98
|
-
assert buffer.emit(tag, d.instance.format(tag, time, {"a" => 11}), chain)
|
99
|
-
assert_equal 1,
|
88
|
+
assert buffer.emit(tag, d.instance.format(tag, time, {"a" => 11}), chain), "Should trigger flush"
|
89
|
+
assert_equal 1, count_buffer_events.call # new chunk
|
100
90
|
|
101
91
|
# flush_trigger false
|
102
|
-
assert !buffer.emit(tag, d.instance.format(tag, time, {"a" => 12}), chain)
|
103
|
-
assert_equal 2,
|
92
|
+
assert !buffer.emit(tag, d.instance.format(tag, time, {"a" => 12}), chain), "Shouldn't trigger flush"
|
93
|
+
assert_equal 2, count_buffer_events.call
|
104
94
|
end
|
105
95
|
|
106
|
-
def
|
107
|
-
|
108
|
-
|
109
|
-
|
96
|
+
def test_emit_with_oversized_streams
|
97
|
+
d = create_driver
|
98
|
+
buffer = d.instance.instance_variable_get(:@buffer)
|
99
|
+
chain = DummyChain.new
|
100
|
+
tag = d.instance.instance_variable_get(:@tag)
|
101
|
+
time = Time.now.to_i
|
102
|
+
count_buffer_events = -> { buffer.instance_variable_get(:@map)[''].record_count }
|
103
|
+
count_queued_buffers = -> { buffer.instance_variable_get(:@queue).size }
|
110
104
|
|
111
|
-
|
112
|
-
chunk1 = buf1.new_chunk('key1')
|
113
|
-
chunk2 = buf1.new_chunk('key2')
|
114
|
-
assert_equal 0, chunk1.record_counter
|
115
|
-
assert_equal 0, chunk2.record_counter
|
105
|
+
buffer.start
|
116
106
|
|
117
|
-
|
118
|
-
|
119
|
-
|
107
|
+
events = 21.times.map { |i| [time, {a: i}] }
|
108
|
+
event_stream = d.instance.format_stream(tag, events)
|
109
|
+
assert buffer.emit(tag, event_stream, chain), "Should trigger flush"
|
110
|
+
assert_equal 2, count_queued_buffers.call, "Data should fill up two buffers"
|
111
|
+
assert_equal 1, count_buffer_events.call, "Data should overflow into a new buffer"
|
112
|
+
assert buffer.instance_variable_get(:@queue).all? { |b| b.record_count == 10 }
|
113
|
+
end
|
120
114
|
|
121
|
-
|
122
|
-
|
123
|
-
|
124
|
-
|
125
|
-
|
126
|
-
|
127
|
-
|
128
|
-
|
129
|
-
buf1.shutdown
|
115
|
+
def test_emit_with_oversized_streams_and_ongoing_buffer_chunks
|
116
|
+
d = create_driver
|
117
|
+
buffer = d.instance.instance_variable_get(:@buffer)
|
118
|
+
chain = DummyChain.new
|
119
|
+
tag = d.instance.instance_variable_get(:@tag)
|
120
|
+
time = Time.now.to_i
|
121
|
+
count_buffer_events = -> { buffer.instance_variable_get(:@map)[''].record_count }
|
122
|
+
count_queued_buffers = -> { buffer.instance_variable_get(:@queue).size }
|
130
123
|
|
131
|
-
|
132
|
-
buf2, *_ = create_buffer_with_attributes
|
133
|
-
queue, map = buf2.resume
|
124
|
+
buffer.start
|
134
125
|
|
135
|
-
|
136
|
-
|
137
|
-
|
126
|
+
data_streams = [2, 21].map do |stream_size|
|
127
|
+
d.instance.format_stream(
|
128
|
+
tag,
|
129
|
+
stream_size.times.map { |i| [time, {a: i}] }
|
130
|
+
)
|
131
|
+
end
|
138
132
|
|
139
|
-
|
140
|
-
|
141
|
-
resumed_chunk2 = map.values.first
|
142
|
-
assert_equal chunk1.path, resumed_chunk1.path
|
143
|
-
assert_equal chunk2.path, resumed_chunk2.path
|
144
|
-
assert chunk1 != resumed_chunk1
|
145
|
-
assert chunk2 != resumed_chunk2
|
133
|
+
assert !buffer.emit(tag, data_streams[0], chain), "Should not trigger flush"
|
134
|
+
assert buffer.emit(tag, data_streams[1], chain), "Should trigger flush"
|
146
135
|
|
147
|
-
|
148
|
-
assert_equal
|
149
|
-
|
136
|
+
assert_equal 2, count_queued_buffers.call, "Data should fill up two buffers"
|
137
|
+
assert_equal 3, count_buffer_events.call, "Data should overflow into a new buffer"
|
138
|
+
assert buffer.instance_variable_get(:@queue).all? { |b| b.record_count == 10 }
|
139
|
+
end
|
140
|
+
|
141
|
+
def test_new_chunk
|
142
|
+
d = create_driver
|
143
|
+
buffer = d.instance.instance_variable_get(:@buffer)
|
150
144
|
|
151
|
-
|
152
|
-
|
145
|
+
chunk1 = buffer.new_chunk('')
|
146
|
+
chunk2 = buffer.new_chunk('')
|
153
147
|
|
154
|
-
|
155
|
-
|
148
|
+
assert chunk1 != chunk2
|
149
|
+
assert chunk1.path != chunk2.path
|
156
150
|
end
|
157
151
|
|
158
152
|
def test_resume_from_msgpack_chunks
|
153
|
+
d = create_driver
|
154
|
+
events = 2.times.map { |i| [Time.now.to_i, {a: i}] }
|
155
|
+
event_stream = d.instance.format_stream('test', events)
|
159
156
|
# Setup buffer to test chunks
|
160
|
-
buf1, prefix, suffix = create_buffer_with_attributes
|
157
|
+
buf1, prefix, suffix = create_buffer_with_attributes
|
161
158
|
buf1.start
|
162
159
|
|
163
160
|
# Create chunks to test
|
164
161
|
chunk1 = buf1.new_chunk('key1')
|
165
162
|
chunk2 = buf1.new_chunk('key2')
|
166
|
-
assert_equal 0, chunk1.
|
167
|
-
assert_equal 0, chunk2.
|
163
|
+
assert_equal 0, chunk1.record_count
|
164
|
+
assert_equal 0, chunk2.record_count
|
168
165
|
|
169
166
|
# Write data into chunks
|
170
|
-
chunk1
|
171
|
-
|
172
|
-
chunk2 << MessagePack.pack('data3')
|
173
|
-
chunk2 << MessagePack.pack('data4')
|
167
|
+
chunk1.write(event_stream, 2)
|
168
|
+
chunk2.write(event_stream, 2)
|
174
169
|
|
175
170
|
# Enqueue chunk1 and leave chunk2 open
|
176
171
|
buf1.enqueue(chunk1)
|
177
|
-
assert
|
172
|
+
assert(
|
178
173
|
chunk1.path =~ /\A#{prefix}[-_.a-zA-Z0-9\%]+\.q[0-9a-f]+#{suffix}\Z/,
|
179
174
|
"chunk1 must be enqueued"
|
180
|
-
|
175
|
+
)
|
176
|
+
assert(
|
181
177
|
chunk2.path =~ /\A#{prefix}[-_.a-zA-Z0-9\%]+\.b[0-9a-f]+#{suffix}\Z/,
|
182
178
|
"chunk2 is not enqueued yet"
|
179
|
+
)
|
183
180
|
buf1.shutdown
|
184
181
|
|
185
182
|
# Setup a new buffer to test resume
|
@@ -202,14 +199,10 @@ class EventLimitedFileBufferTest < Test::Unit::TestCase
|
|
202
199
|
assert_equal Fluent::EventLimitedBufferChunk, resumed_chunk1.class
|
203
200
|
assert_equal Fluent::EventLimitedBufferChunk, resumed_chunk2.class
|
204
201
|
|
205
|
-
assert_equal
|
206
|
-
|
207
|
-
resumed_chunk1.read
|
208
|
-
assert_equal \
|
209
|
-
MessagePack.pack('data3') + MessagePack.pack('data4'),
|
210
|
-
resumed_chunk2.read
|
202
|
+
assert_equal event_stream, resumed_chunk1.read
|
203
|
+
assert_equal event_stream, resumed_chunk2.read
|
211
204
|
|
212
|
-
assert_equal 2, resumed_chunk1.
|
213
|
-
assert_equal 2, resumed_chunk2.
|
205
|
+
assert_equal 2, resumed_chunk1.record_count
|
206
|
+
assert_equal 2, resumed_chunk2.record_count
|
214
207
|
end
|
215
208
|
end
|
metadata
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: fluent-plugin-buffer-event_limited
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.1.
|
4
|
+
version: 0.1.5
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- TAGOMORI Satoshi
|
@@ -9,7 +9,7 @@ authors:
|
|
9
9
|
autorequire:
|
10
10
|
bindir: bin
|
11
11
|
cert_chain: []
|
12
|
-
date: 2015-11-
|
12
|
+
date: 2015-11-27 00:00:00.000000000 Z
|
13
13
|
dependencies:
|
14
14
|
- !ruby/object:Gem::Dependency
|
15
15
|
name: bundler
|
@@ -53,6 +53,20 @@ dependencies:
|
|
53
53
|
- - ">="
|
54
54
|
- !ruby/object:Gem::Version
|
55
55
|
version: '0'
|
56
|
+
- !ruby/object:Gem::Dependency
|
57
|
+
name: pry
|
58
|
+
requirement: !ruby/object:Gem::Requirement
|
59
|
+
requirements:
|
60
|
+
- - ">="
|
61
|
+
- !ruby/object:Gem::Version
|
62
|
+
version: '0'
|
63
|
+
type: :development
|
64
|
+
prerelease: false
|
65
|
+
version_requirements: !ruby/object:Gem::Requirement
|
66
|
+
requirements:
|
67
|
+
- - ">="
|
68
|
+
- !ruby/object:Gem::Version
|
69
|
+
version: '0'
|
56
70
|
- !ruby/object:Gem::Dependency
|
57
71
|
name: fluentd
|
58
72
|
requirement: !ruby/object:Gem::Requirement
|