fluent-plugin-buffer-event_limited 0.1.5 → 0.1.6

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA1:
3
- metadata.gz: 9ab9aa64fad78b7f58fb453867cee20b4c87b7fa
4
- data.tar.gz: 5443046e647187e4bf5ee215bcb925ddc6a50092
3
+ metadata.gz: 6dac5d961204427f1720db8a996c381430953103
4
+ data.tar.gz: f17424fa7ae74a0bb472f2f5b4bc0141d4a82670
5
5
  SHA512:
6
- metadata.gz: 88483e3ca720d2518657c686b5fa5a0a3c3cacb8689e302725d628d08d557ea0efcec28b85c6ad1c70430c154cc702fc9766c05bfcd662249877c9d9c1c5c218
7
- data.tar.gz: 653ab305d9e8c33fd4898494c5f8bb1c96ee14b66f2df9d463101cf8ac4183a7caa7f188e213f57af462266e9ecbe7e3b0b02e0578a24fd1c5b14adb05de5d50
6
+ metadata.gz: 78739c15f9b891f7131fa018f07bbecda52010522810434941b5c90d557afa98ad2bcf710dc5e060c823199b8fd69289ca78e9598002e0b9c22af12f9a2e14b3
7
+ data.tar.gz: 7437256f6949ee8c5ebcc040cac28ac6a54316794c7f8dbeec0a6af1af3b23231718cfa04e0a4d3fe9261313240d56be3a57d2f2f2f7d8f39adbeabd870c749d
@@ -2,11 +2,11 @@
2
2
 
3
3
  Gem::Specification.new do |spec|
4
4
  spec.name = "fluent-plugin-buffer-event_limited"
5
- spec.version = "0.1.5"
6
- spec.authors = ["TAGOMORI Satoshi", 'Gergo Sulymosi']
7
- spec.email = ["tagomoris@gmail.com", 'gergo.sulymosi@gmail.com']
8
- spec.description = %q{Fluentd memory buffer plugin with many types of chunk limits}
9
- spec.summary = %q{Alternative file buffer plugin for Fluentd to realize less delay}
5
+ spec.version = "0.1.6"
6
+ spec.authors = ['Gergo Sulymosi']
7
+ spec.email = ['gergo.sulymosi@gmail.com']
8
+ spec.description = %{Fluentd memory buffer plugin with many types of chunk limits}
9
+ spec.summary = %{Alternative file buffer plugin for Fluentd to limit events in a buffer not it's size}
10
10
  spec.homepage = "https://github.com/trekdemo/fluent-plugin-buffer-event_limited"
11
11
  spec.license = "APLv2"
12
12
 
@@ -9,16 +9,30 @@ module Fluent
9
9
  @data = data.to_str.freeze
10
10
  end
11
11
 
12
- def records
13
- @records ||= (data.empty? ? [] : unpack(data)).freeze
14
- end
12
+ # Partition the data into required sizes
13
+ def each_slice(target_sizes)
14
+ target_size = target_sizes.next
15
+ slice_size = 0
16
+ slice_data = ''
17
+
18
+ reader.each do |event|
19
+ if slice_size == target_size
20
+ yield(slice_data, slice_size)
21
+
22
+ target_size = target_sizes.next
23
+ slice_size = 0
24
+ slice_data = ''
25
+ end
26
+
27
+ slice_data << pack(event)
28
+ slice_size += 1
29
+ end
15
30
 
16
- def as_events
17
- records.dup
31
+ yield(slice_data, slice_size)
18
32
  end
19
33
 
20
34
  def size
21
- @size ||= records.size
35
+ @size ||= reader.each.reduce(0) { |c, _| c + 1 }
22
36
  end
23
37
 
24
38
  alias_method :to_str, :data
@@ -26,8 +40,12 @@ module Fluent
26
40
 
27
41
  private
28
42
 
29
- def unpack(data)
30
- MessagePack::Unpacker.new(StringIO.new(data)).each.to_a
43
+ def pack(event)
44
+ MessagePack.pack(event)
45
+ end
46
+
47
+ def reader
48
+ @reader ||= MessagePack::Unpacker.new(StringIO.new(data))
31
49
  end
32
50
  end
33
51
 
@@ -60,44 +78,6 @@ module Fluent
60
78
 
61
79
  config_param :buffer_chunk_records_limit, :integer, :default => Float::INFINITY
62
80
 
63
- def emit(key, data, chain)
64
- data = MessagePackFormattedBufferData.new(data)
65
- key = key.to_s
66
- flush_trigger = false
67
-
68
- synchronize do
69
- # Get the active chunk if it exists
70
- chunk = (@map[key] ||= new_chunk(key))
71
-
72
- # Partition the data into chunks that can be written into new chunks
73
- events = data.as_events
74
- [
75
- events.shift(chunk.remaining_capacity),
76
- *events.each_slice(@buffer_chunk_records_limit)
77
- ].each do |event_group|
78
- chunk, queue_size = rotate_chunk!(chunk, key)
79
- # Trigger flush only when we put the first chunk into it
80
- flush_trigger ||= (queue_size == 0)
81
-
82
- chain.next
83
- chunk.write(
84
- event_group.map { |d| MessagePack.pack(d) }.join(''),
85
- event_group.size
86
- )
87
- end
88
-
89
- return flush_trigger
90
- end
91
- end
92
-
93
- def new_chunk(key)
94
- encoded_key = encode_key(key)
95
- path, tsuffix = make_path(encoded_key, 'b')
96
- unique_id = tsuffix_to_unique_id(tsuffix)
97
-
98
- chunk_factory(key, path, unique_id, 'a+')
99
- end
100
-
101
81
  # Copied here from
102
82
  # https://github.com/fluent/fluentd/blob/d3ae305b6e7521fafac6ad30c6b0a8763c363b65/lib/fluent/plugin/buf_file.rb#L128-L165
103
83
  def resume
@@ -134,8 +114,40 @@ module Fluent
134
114
  return queue, map
135
115
  end
136
116
 
117
+ def emit(key, data, chain)
118
+ data = MessagePackFormattedBufferData.new(data)
119
+ key = key.to_s
120
+ flush_trigger = false
121
+
122
+ synchronize do
123
+ # Get the current open chunk
124
+ chunk = (@map[key] ||= new_chunk(key))
125
+
126
+ data.each_slice(chunk_sizes(chunk.remaining_capacity)) do |data, size|
127
+ chain.next
128
+ chunk.write(data, size)
129
+ chunk, queue_size = rotate_chunk!(chunk, key)
130
+ flush_trigger ||= (queue_size == 0)
131
+ end
132
+ end
133
+
134
+ return flush_trigger
135
+ end
136
+
137
+ def new_chunk(key)
138
+ encoded_key = encode_key(key)
139
+ path, tsuffix = make_path(encoded_key, 'b')
140
+ unique_id = tsuffix_to_unique_id(tsuffix)
141
+
142
+ chunk_factory(key, path, unique_id, 'a+')
143
+ end
144
+
137
145
  private
138
146
 
147
+ def chunk_factory(key, path, uniq_id, mode)
148
+ EventLimitedBufferChunk.new(key, path, uniq_id, @buffer_chunk_records_limit, mode)
149
+ end
150
+
139
151
  def rotate_chunk!(chunk, key)
140
152
  queue_size = nil
141
153
  return chunk unless chunk.full?
@@ -150,12 +162,15 @@ module Fluent
150
162
  return chunk, queue_size
151
163
  end
152
164
 
153
- def storable?(chunk, data)
154
- (chunk.record_count + data.size) <= @buffer_chunk_records_limit
155
- end
156
-
157
- def chunk_factory(key, path, uniq_id, mode)
158
- EventLimitedBufferChunk.new(key, path, uniq_id, @buffer_chunk_records_limit, mode)
165
+ # Generates infinite sequence with and initial value followed by the chunk
166
+ # limit
167
+ #
168
+ # Eg.: [2, 5, 5, 5, 5, 5, ...]
169
+ def chunk_sizes(initial_size)
170
+ Enumerator.new do |y|
171
+ y << initial_size
172
+ y << @buffer_chunk_records_limit while true
173
+ end
159
174
  end
160
175
  end
161
176
  end
@@ -73,24 +73,24 @@ class EventLimitedFileBufferTest < Test::Unit::TestCase
73
73
  d.emit({"a" => 1})
74
74
  assert_equal 1, count_buffer_events.call
75
75
 
76
- (2..9).each { |i| d.emit({"a" => i}) }
77
- assert_equal 9, count_buffer_events.call
76
+ (2..8).each { |i| d.emit({"a" => i}) }
77
+ assert_equal 8, count_buffer_events.call
78
78
 
79
79
  chain = DummyChain.new
80
80
  tag = d.instance.instance_variable_get(:@tag)
81
81
  time = Time.now.to_i
82
82
 
83
83
  # flush_trigger false
84
- assert !buffer.emit(tag, d.instance.format(tag, time, {"a" => 10}), chain), "Shouldn't trigger flush"
85
- assert_equal 10, count_buffer_events.call
84
+ assert !buffer.emit(tag, d.instance.format(tag, time, {"a" => 9}), chain), "Shouldn't trigger flush"
85
+ assert_equal 9, count_buffer_events.call
86
86
 
87
87
  # flush_trigger true
88
- assert buffer.emit(tag, d.instance.format(tag, time, {"a" => 11}), chain), "Should trigger flush"
89
- assert_equal 1, count_buffer_events.call # new chunk
88
+ assert buffer.emit(tag, d.instance.format(tag, time, {"a" => 10}), chain), "Should trigger flush"
89
+ assert_equal 0, count_buffer_events.call # new chunk
90
90
 
91
91
  # flush_trigger false
92
- assert !buffer.emit(tag, d.instance.format(tag, time, {"a" => 12}), chain), "Shouldn't trigger flush"
93
- assert_equal 2, count_buffer_events.call
92
+ assert !buffer.emit(tag, d.instance.format(tag, time, {"a" => 11}), chain), "Shouldn't trigger flush"
93
+ assert_equal 1, count_buffer_events.call
94
94
  end
95
95
 
96
96
  def test_emit_with_oversized_streams
metadata CHANGED
@@ -1,10 +1,9 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: fluent-plugin-buffer-event_limited
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.1.5
4
+ version: 0.1.6
5
5
  platform: ruby
6
6
  authors:
7
- - TAGOMORI Satoshi
8
7
  - Gergo Sulymosi
9
8
  autorequire:
10
9
  bindir: bin
@@ -83,7 +82,6 @@ dependencies:
83
82
  version: 0.10.42
84
83
  description: Fluentd memory buffer plugin with many types of chunk limits
85
84
  email:
86
- - tagomoris@gmail.com
87
85
  - gergo.sulymosi@gmail.com
88
86
  executables: []
89
87
  extensions: []
@@ -124,7 +122,8 @@ rubyforge_project:
124
122
  rubygems_version: 2.4.5.1
125
123
  signing_key:
126
124
  specification_version: 4
127
- summary: Alternative file buffer plugin for Fluentd to realize less delay
125
+ summary: Alternative file buffer plugin for Fluentd to limit events in a buffer not
126
+ it's size
128
127
  test_files:
129
128
  - test/plugin/dummy_chain.rb
130
129
  - test/plugin/test_buf_event_limited.rb