fluentd 1.17.0-x86-mingw32 → 1.17.1-x86-mingw32
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/CHANGELOG.md +46 -0
- data/README.md +1 -0
- data/SECURITY.md +2 -2
- data/fluent.conf +14 -14
- data/lib/fluent/command/cap_ctl.rb +4 -4
- data/lib/fluent/compat/call_super_mixin.rb +3 -3
- data/lib/fluent/compat/propagate_default.rb +4 -4
- data/lib/fluent/config/yaml_parser/parser.rb +4 -0
- data/lib/fluent/log/console_adapter.rb +4 -2
- data/lib/fluent/plugin/in_exec.rb +14 -2
- data/lib/fluent/plugin/in_http.rb +1 -1
- data/lib/fluent/plugin/in_sample.rb +13 -7
- data/lib/fluent/plugin/in_tail.rb +65 -23
- data/lib/fluent/plugin/out_copy.rb +1 -1
- data/lib/fluent/plugin/out_file.rb +8 -0
- data/lib/fluent/plugin/out_http.rb +12 -0
- data/lib/fluent/plugin/parser_json.rb +4 -12
- data/lib/fluent/plugin_helper/http_server/server.rb +1 -1
- data/lib/fluent/version.rb +1 -1
- data/templates/new_gem/fluent-plugin.gemspec.erb +6 -5
- metadata +25 -472
- data/.github/DISCUSSION_TEMPLATE/q-a-japanese.yml +0 -50
- data/.github/DISCUSSION_TEMPLATE/q-a.yml +0 -47
- data/.github/ISSUE_TEMPLATE/bug_report.yml +0 -71
- data/.github/ISSUE_TEMPLATE/config.yml +0 -5
- data/.github/ISSUE_TEMPLATE/feature_request.yml +0 -39
- data/.github/ISSUE_TEMPLATE.md +0 -17
- data/.github/PULL_REQUEST_TEMPLATE.md +0 -14
- data/.github/workflows/stale-actions.yml +0 -24
- data/.github/workflows/test-ruby-head.yml +0 -31
- data/.github/workflows/test.yml +0 -32
- data/.gitignore +0 -30
- data/Gemfile +0 -9
- data/fluentd.gemspec +0 -62
- data/test/command/test_binlog_reader.rb +0 -362
- data/test/command/test_ca_generate.rb +0 -70
- data/test/command/test_cap_ctl.rb +0 -100
- data/test/command/test_cat.rb +0 -128
- data/test/command/test_ctl.rb +0 -56
- data/test/command/test_fluentd.rb +0 -1291
- data/test/command/test_plugin_config_formatter.rb +0 -397
- data/test/command/test_plugin_generator.rb +0 -109
- data/test/compat/test_calls_super.rb +0 -166
- data/test/compat/test_parser.rb +0 -92
- data/test/config/assertions.rb +0 -42
- data/test/config/test_config_parser.rb +0 -551
- data/test/config/test_configurable.rb +0 -1784
- data/test/config/test_configure_proxy.rb +0 -604
- data/test/config/test_dsl.rb +0 -415
- data/test/config/test_element.rb +0 -518
- data/test/config/test_literal_parser.rb +0 -309
- data/test/config/test_plugin_configuration.rb +0 -56
- data/test/config/test_section.rb +0 -191
- data/test/config/test_system_config.rb +0 -195
- data/test/config/test_types.rb +0 -408
- data/test/counter/test_client.rb +0 -563
- data/test/counter/test_error.rb +0 -44
- data/test/counter/test_mutex_hash.rb +0 -179
- data/test/counter/test_server.rb +0 -589
- data/test/counter/test_store.rb +0 -258
- data/test/counter/test_validator.rb +0 -137
- data/test/helper.rb +0 -155
- data/test/helpers/fuzzy_assert.rb +0 -89
- data/test/helpers/process_extenstion.rb +0 -33
- data/test/log/test_console_adapter.rb +0 -117
- data/test/plugin/data/2010/01/20100102-030405.log +0 -0
- data/test/plugin/data/2010/01/20100102-030406.log +0 -0
- data/test/plugin/data/2010/01/20100102.log +0 -0
- data/test/plugin/data/log/bar +0 -0
- data/test/plugin/data/log/foo/bar.log +0 -0
- data/test/plugin/data/log/foo/bar2 +0 -0
- data/test/plugin/data/log/test.log +0 -0
- data/test/plugin/data/log_numeric/01.log +0 -0
- data/test/plugin/data/log_numeric/02.log +0 -0
- data/test/plugin/data/log_numeric/12.log +0 -0
- data/test/plugin/data/log_numeric/14.log +0 -0
- data/test/plugin/data/sd_file/config +0 -11
- data/test/plugin/data/sd_file/config.json +0 -17
- data/test/plugin/data/sd_file/config.yaml +0 -11
- data/test/plugin/data/sd_file/config.yml +0 -11
- data/test/plugin/data/sd_file/invalid_config.yml +0 -7
- data/test/plugin/in_tail/test_fifo.rb +0 -121
- data/test/plugin/in_tail/test_io_handler.rb +0 -150
- data/test/plugin/in_tail/test_position_file.rb +0 -346
- data/test/plugin/out_forward/test_ack_handler.rb +0 -140
- data/test/plugin/out_forward/test_connection_manager.rb +0 -145
- data/test/plugin/out_forward/test_handshake_protocol.rb +0 -112
- data/test/plugin/out_forward/test_load_balancer.rb +0 -106
- data/test/plugin/out_forward/test_socket_cache.rb +0 -174
- data/test/plugin/test_bare_output.rb +0 -131
- data/test/plugin/test_base.rb +0 -247
- data/test/plugin/test_buf_file.rb +0 -1314
- data/test/plugin/test_buf_file_single.rb +0 -898
- data/test/plugin/test_buf_memory.rb +0 -42
- data/test/plugin/test_buffer.rb +0 -1493
- data/test/plugin/test_buffer_chunk.rb +0 -209
- data/test/plugin/test_buffer_file_chunk.rb +0 -871
- data/test/plugin/test_buffer_file_single_chunk.rb +0 -611
- data/test/plugin/test_buffer_memory_chunk.rb +0 -339
- data/test/plugin/test_compressable.rb +0 -87
- data/test/plugin/test_file_util.rb +0 -96
- data/test/plugin/test_filter.rb +0 -368
- data/test/plugin/test_filter_grep.rb +0 -697
- data/test/plugin/test_filter_parser.rb +0 -731
- data/test/plugin/test_filter_record_transformer.rb +0 -577
- data/test/plugin/test_filter_stdout.rb +0 -207
- data/test/plugin/test_formatter_csv.rb +0 -136
- data/test/plugin/test_formatter_hash.rb +0 -38
- data/test/plugin/test_formatter_json.rb +0 -61
- data/test/plugin/test_formatter_ltsv.rb +0 -70
- data/test/plugin/test_formatter_msgpack.rb +0 -28
- data/test/plugin/test_formatter_out_file.rb +0 -116
- data/test/plugin/test_formatter_single_value.rb +0 -44
- data/test/plugin/test_formatter_tsv.rb +0 -76
- data/test/plugin/test_in_debug_agent.rb +0 -49
- data/test/plugin/test_in_exec.rb +0 -261
- data/test/plugin/test_in_forward.rb +0 -1178
- data/test/plugin/test_in_gc_stat.rb +0 -62
- data/test/plugin/test_in_http.rb +0 -1124
- data/test/plugin/test_in_monitor_agent.rb +0 -922
- data/test/plugin/test_in_object_space.rb +0 -66
- data/test/plugin/test_in_sample.rb +0 -190
- data/test/plugin/test_in_syslog.rb +0 -505
- data/test/plugin/test_in_tail.rb +0 -3429
- data/test/plugin/test_in_tcp.rb +0 -328
- data/test/plugin/test_in_udp.rb +0 -296
- data/test/plugin/test_in_unix.rb +0 -181
- data/test/plugin/test_input.rb +0 -137
- data/test/plugin/test_metadata.rb +0 -89
- data/test/plugin/test_metrics.rb +0 -294
- data/test/plugin/test_metrics_local.rb +0 -96
- data/test/plugin/test_multi_output.rb +0 -204
- data/test/plugin/test_out_copy.rb +0 -308
- data/test/plugin/test_out_exec.rb +0 -312
- data/test/plugin/test_out_exec_filter.rb +0 -606
- data/test/plugin/test_out_file.rb +0 -1038
- data/test/plugin/test_out_forward.rb +0 -1349
- data/test/plugin/test_out_http.rb +0 -557
- data/test/plugin/test_out_null.rb +0 -105
- data/test/plugin/test_out_relabel.rb +0 -28
- data/test/plugin/test_out_roundrobin.rb +0 -146
- data/test/plugin/test_out_secondary_file.rb +0 -458
- data/test/plugin/test_out_stdout.rb +0 -205
- data/test/plugin/test_out_stream.rb +0 -103
- data/test/plugin/test_output.rb +0 -1334
- data/test/plugin/test_output_as_buffered.rb +0 -2024
- data/test/plugin/test_output_as_buffered_backup.rb +0 -363
- data/test/plugin/test_output_as_buffered_compress.rb +0 -179
- data/test/plugin/test_output_as_buffered_overflow.rb +0 -250
- data/test/plugin/test_output_as_buffered_retries.rb +0 -966
- data/test/plugin/test_output_as_buffered_secondary.rb +0 -882
- data/test/plugin/test_output_as_standard.rb +0 -374
- data/test/plugin/test_owned_by.rb +0 -34
- data/test/plugin/test_parser.rb +0 -399
- data/test/plugin/test_parser_apache.rb +0 -42
- data/test/plugin/test_parser_apache2.rb +0 -47
- data/test/plugin/test_parser_apache_error.rb +0 -45
- data/test/plugin/test_parser_csv.rb +0 -200
- data/test/plugin/test_parser_json.rb +0 -244
- data/test/plugin/test_parser_labeled_tsv.rb +0 -160
- data/test/plugin/test_parser_msgpack.rb +0 -127
- data/test/plugin/test_parser_multiline.rb +0 -111
- data/test/plugin/test_parser_nginx.rb +0 -88
- data/test/plugin/test_parser_none.rb +0 -52
- data/test/plugin/test_parser_regexp.rb +0 -284
- data/test/plugin/test_parser_syslog.rb +0 -650
- data/test/plugin/test_parser_tsv.rb +0 -122
- data/test/plugin/test_sd_file.rb +0 -228
- data/test/plugin/test_sd_srv.rb +0 -230
- data/test/plugin/test_storage.rb +0 -166
- data/test/plugin/test_storage_local.rb +0 -335
- data/test/plugin/test_string_util.rb +0 -26
- data/test/plugin_helper/data/cert/cert-key.pem +0 -27
- data/test/plugin_helper/data/cert/cert-with-CRLF.pem +0 -19
- data/test/plugin_helper/data/cert/cert-with-no-newline.pem +0 -19
- data/test/plugin_helper/data/cert/cert.pem +0 -19
- data/test/plugin_helper/data/cert/cert_chains/ca-cert-key.pem +0 -27
- data/test/plugin_helper/data/cert/cert_chains/ca-cert.pem +0 -20
- data/test/plugin_helper/data/cert/cert_chains/cert-key.pem +0 -27
- data/test/plugin_helper/data/cert/cert_chains/cert.pem +0 -40
- data/test/plugin_helper/data/cert/empty.pem +0 -0
- data/test/plugin_helper/data/cert/generate_cert.rb +0 -125
- data/test/plugin_helper/data/cert/with_ca/ca-cert-key-pass.pem +0 -30
- data/test/plugin_helper/data/cert/with_ca/ca-cert-key.pem +0 -27
- data/test/plugin_helper/data/cert/with_ca/ca-cert-pass.pem +0 -20
- data/test/plugin_helper/data/cert/with_ca/ca-cert.pem +0 -20
- data/test/plugin_helper/data/cert/with_ca/cert-key-pass.pem +0 -30
- data/test/plugin_helper/data/cert/with_ca/cert-key.pem +0 -27
- data/test/plugin_helper/data/cert/with_ca/cert-pass.pem +0 -21
- data/test/plugin_helper/data/cert/with_ca/cert.pem +0 -21
- data/test/plugin_helper/data/cert/without_ca/cert-key-pass.pem +0 -30
- data/test/plugin_helper/data/cert/without_ca/cert-key.pem +0 -27
- data/test/plugin_helper/data/cert/without_ca/cert-pass.pem +0 -20
- data/test/plugin_helper/data/cert/without_ca/cert.pem +0 -20
- data/test/plugin_helper/http_server/test_app.rb +0 -65
- data/test/plugin_helper/http_server/test_route.rb +0 -32
- data/test/plugin_helper/service_discovery/test_manager.rb +0 -93
- data/test/plugin_helper/service_discovery/test_round_robin_balancer.rb +0 -21
- data/test/plugin_helper/test_cert_option.rb +0 -25
- data/test/plugin_helper/test_child_process.rb +0 -862
- data/test/plugin_helper/test_compat_parameters.rb +0 -358
- data/test/plugin_helper/test_event_emitter.rb +0 -80
- data/test/plugin_helper/test_event_loop.rb +0 -52
- data/test/plugin_helper/test_extract.rb +0 -194
- data/test/plugin_helper/test_formatter.rb +0 -255
- data/test/plugin_helper/test_http_server_helper.rb +0 -372
- data/test/plugin_helper/test_inject.rb +0 -561
- data/test/plugin_helper/test_metrics.rb +0 -137
- data/test/plugin_helper/test_parser.rb +0 -264
- data/test/plugin_helper/test_record_accessor.rb +0 -238
- data/test/plugin_helper/test_retry_state.rb +0 -1006
- data/test/plugin_helper/test_server.rb +0 -1895
- data/test/plugin_helper/test_service_discovery.rb +0 -165
- data/test/plugin_helper/test_socket.rb +0 -146
- data/test/plugin_helper/test_storage.rb +0 -542
- data/test/plugin_helper/test_thread.rb +0 -164
- data/test/plugin_helper/test_timer.rb +0 -130
- data/test/scripts/exec_script.rb +0 -32
- data/test/scripts/fluent/plugin/formatter1/formatter_test1.rb +0 -7
- data/test/scripts/fluent/plugin/formatter2/formatter_test2.rb +0 -7
- data/test/scripts/fluent/plugin/formatter_known.rb +0 -8
- data/test/scripts/fluent/plugin/out_test.rb +0 -81
- data/test/scripts/fluent/plugin/out_test2.rb +0 -80
- data/test/scripts/fluent/plugin/parser_known.rb +0 -4
- data/test/test_capability.rb +0 -74
- data/test/test_clock.rb +0 -164
- data/test/test_config.rb +0 -369
- data/test/test_configdsl.rb +0 -148
- data/test/test_daemonizer.rb +0 -91
- data/test/test_engine.rb +0 -203
- data/test/test_event.rb +0 -531
- data/test/test_event_router.rb +0 -348
- data/test/test_event_time.rb +0 -199
- data/test/test_file_wrapper.rb +0 -53
- data/test/test_filter.rb +0 -121
- data/test/test_fluent_log_event_router.rb +0 -99
- data/test/test_formatter.rb +0 -369
- data/test/test_input.rb +0 -31
- data/test/test_log.rb +0 -1076
- data/test/test_match.rb +0 -148
- data/test/test_mixin.rb +0 -351
- data/test/test_msgpack_factory.rb +0 -50
- data/test/test_oj_options.rb +0 -55
- data/test/test_output.rb +0 -278
- data/test/test_plugin.rb +0 -251
- data/test/test_plugin_classes.rb +0 -370
- data/test/test_plugin_helper.rb +0 -81
- data/test/test_plugin_id.rb +0 -119
- data/test/test_process.rb +0 -14
- data/test/test_root_agent.rb +0 -951
- data/test/test_static_config_analysis.rb +0 -177
- data/test/test_supervisor.rb +0 -821
- data/test/test_test_drivers.rb +0 -136
- data/test/test_time_formatter.rb +0 -301
- data/test/test_time_parser.rb +0 -362
- data/test/test_tls.rb +0 -65
- data/test/test_unique_id.rb +0 -47
- data/test/test_variable_store.rb +0 -65
data/test/plugin/test_buffer.rb
DELETED
@@ -1,1493 +0,0 @@
|
|
1
|
-
require_relative '../helper'
|
2
|
-
require 'fluent/plugin/buffer'
|
3
|
-
require 'fluent/plugin/buffer/memory_chunk'
|
4
|
-
require 'fluent/plugin/compressable'
|
5
|
-
require 'fluent/plugin/buffer/chunk'
|
6
|
-
require 'fluent/event'
|
7
|
-
require 'flexmock/test_unit'
|
8
|
-
|
9
|
-
require 'fluent/log'
|
10
|
-
require 'fluent/plugin_id'
|
11
|
-
|
12
|
-
require 'time'
|
13
|
-
|
14
|
-
module FluentPluginBufferTest
|
15
|
-
class DummyOutputPlugin < Fluent::Plugin::Base
|
16
|
-
include Fluent::PluginId
|
17
|
-
include Fluent::PluginLoggerMixin
|
18
|
-
end
|
19
|
-
class DummyMemoryChunkError < StandardError; end
|
20
|
-
class DummyMemoryChunk < Fluent::Plugin::Buffer::MemoryChunk
|
21
|
-
attr_reader :append_count, :rollbacked, :closed, :purged, :chunk
|
22
|
-
attr_accessor :failing
|
23
|
-
def initialize(metadata, compress: :text)
|
24
|
-
super
|
25
|
-
@append_count = 0
|
26
|
-
@rollbacked = false
|
27
|
-
@closed = false
|
28
|
-
@purged = false
|
29
|
-
@failing = false
|
30
|
-
end
|
31
|
-
def concat(data, size)
|
32
|
-
@append_count += 1
|
33
|
-
raise DummyMemoryChunkError if @failing
|
34
|
-
super
|
35
|
-
end
|
36
|
-
def rollback
|
37
|
-
super
|
38
|
-
@rollbacked = true
|
39
|
-
end
|
40
|
-
def close
|
41
|
-
super
|
42
|
-
@closed = true
|
43
|
-
end
|
44
|
-
def purge
|
45
|
-
super
|
46
|
-
@purged = true
|
47
|
-
end
|
48
|
-
end
|
49
|
-
class DummyPlugin < Fluent::Plugin::Buffer
|
50
|
-
def create_metadata(timekey=nil, tag=nil, variables=nil)
|
51
|
-
Fluent::Plugin::Buffer::Metadata.new(timekey, tag, variables)
|
52
|
-
end
|
53
|
-
def create_chunk(metadata, data)
|
54
|
-
c = FluentPluginBufferTest::DummyMemoryChunk.new(metadata)
|
55
|
-
c.append(data)
|
56
|
-
c.commit
|
57
|
-
c
|
58
|
-
end
|
59
|
-
def create_chunk_es(metadata, es)
|
60
|
-
c = FluentPluginBufferTest::DummyMemoryChunk.new(metadata)
|
61
|
-
c.concat(es.to_msgpack_stream, es.size)
|
62
|
-
c.commit
|
63
|
-
c
|
64
|
-
end
|
65
|
-
def resume
|
66
|
-
dm0 = create_metadata(Time.parse('2016-04-11 16:00:00 +0000').to_i, nil, nil)
|
67
|
-
dm1 = create_metadata(Time.parse('2016-04-11 16:10:00 +0000').to_i, nil, nil)
|
68
|
-
dm2 = create_metadata(Time.parse('2016-04-11 16:20:00 +0000').to_i, nil, nil)
|
69
|
-
dm3 = create_metadata(Time.parse('2016-04-11 16:30:00 +0000').to_i, nil, nil)
|
70
|
-
staged = {
|
71
|
-
dm2 => create_chunk(dm2, ["b" * 100]).staged!,
|
72
|
-
dm3 => create_chunk(dm3, ["c" * 100]).staged!,
|
73
|
-
}
|
74
|
-
queued = [
|
75
|
-
create_chunk(dm0, ["0" * 100]).enqueued!,
|
76
|
-
create_chunk(dm1, ["a" * 100]).enqueued!,
|
77
|
-
create_chunk(dm1, ["a" * 3]).enqueued!,
|
78
|
-
]
|
79
|
-
return staged, queued
|
80
|
-
end
|
81
|
-
def generate_chunk(metadata)
|
82
|
-
DummyMemoryChunk.new(metadata, compress: @compress)
|
83
|
-
end
|
84
|
-
end
|
85
|
-
end
|
86
|
-
|
87
|
-
class BufferTest < Test::Unit::TestCase
|
88
|
-
def create_buffer(hash)
|
89
|
-
buffer_conf = config_element('buffer', '', hash, [])
|
90
|
-
owner = FluentPluginBufferTest::DummyOutputPlugin.new
|
91
|
-
owner.configure(config_element('ROOT', '', {}, [ buffer_conf ]))
|
92
|
-
p = FluentPluginBufferTest::DummyPlugin.new
|
93
|
-
p.owner = owner
|
94
|
-
p.configure(buffer_conf)
|
95
|
-
p
|
96
|
-
end
|
97
|
-
|
98
|
-
def create_metadata(timekey=nil, tag=nil, variables=nil)
|
99
|
-
Fluent::Plugin::Buffer::Metadata.new(timekey, tag, variables)
|
100
|
-
end
|
101
|
-
|
102
|
-
def create_chunk(metadata, data)
|
103
|
-
c = FluentPluginBufferTest::DummyMemoryChunk.new(metadata)
|
104
|
-
c.append(data)
|
105
|
-
c.commit
|
106
|
-
c
|
107
|
-
end
|
108
|
-
|
109
|
-
def create_chunk_es(metadata, es)
|
110
|
-
c = FluentPluginBufferTest::DummyMemoryChunk.new(metadata)
|
111
|
-
c.concat(es.to_msgpack_stream, es.size)
|
112
|
-
c.commit
|
113
|
-
c
|
114
|
-
end
|
115
|
-
|
116
|
-
setup do
|
117
|
-
Fluent::Test.setup
|
118
|
-
end
|
119
|
-
|
120
|
-
sub_test_case 'using base buffer class' do
|
121
|
-
setup do
|
122
|
-
buffer_conf = config_element('buffer', '', {}, [])
|
123
|
-
owner = FluentPluginBufferTest::DummyOutputPlugin.new
|
124
|
-
owner.configure(config_element('ROOT', '', {}, [ buffer_conf ]))
|
125
|
-
p = Fluent::Plugin::Buffer.new
|
126
|
-
p.owner = owner
|
127
|
-
p.configure(buffer_conf)
|
128
|
-
@p = p
|
129
|
-
end
|
130
|
-
|
131
|
-
test 'default persistency is false' do
|
132
|
-
assert !@p.persistent?
|
133
|
-
end
|
134
|
-
|
135
|
-
test 'chunk bytes limit is 8MB, and total bytes limit is 512MB' do
|
136
|
-
assert_equal 8*1024*1024, @p.chunk_limit_size
|
137
|
-
assert_equal 512*1024*1024, @p.total_limit_size
|
138
|
-
end
|
139
|
-
|
140
|
-
test 'chunk records limit is ignored in default' do
|
141
|
-
assert_nil @p.chunk_limit_records
|
142
|
-
end
|
143
|
-
|
144
|
-
test '#storable? checks total size of staged and enqueued(includes dequeued chunks) against total_limit_size' do
|
145
|
-
assert_equal 512*1024*1024, @p.total_limit_size
|
146
|
-
assert_equal 0, @p.stage_size
|
147
|
-
assert_equal 0, @p.queue_size
|
148
|
-
assert @p.storable?
|
149
|
-
|
150
|
-
@p.stage_size = 256 * 1024 * 1024
|
151
|
-
@p.queue_size = 256 * 1024 * 1024 - 1
|
152
|
-
assert @p.storable?
|
153
|
-
|
154
|
-
@p.queue_size = 256 * 1024 * 1024
|
155
|
-
assert !@p.storable?
|
156
|
-
end
|
157
|
-
|
158
|
-
test '#resume must be implemented by subclass' do
|
159
|
-
assert_raise NotImplementedError do
|
160
|
-
@p.resume
|
161
|
-
end
|
162
|
-
end
|
163
|
-
|
164
|
-
test '#generate_chunk must be implemented by subclass' do
|
165
|
-
assert_raise NotImplementedError do
|
166
|
-
@p.generate_chunk(Object.new)
|
167
|
-
end
|
168
|
-
end
|
169
|
-
end
|
170
|
-
|
171
|
-
sub_test_case 'with default configuration and dummy implementation' do
|
172
|
-
setup do
|
173
|
-
@p = create_buffer({'queued_chunks_limit_size' => 100})
|
174
|
-
@dm0 = create_metadata(Time.parse('2016-04-11 16:00:00 +0000').to_i, nil, nil)
|
175
|
-
@dm1 = create_metadata(Time.parse('2016-04-11 16:10:00 +0000').to_i, nil, nil)
|
176
|
-
@dm2 = create_metadata(Time.parse('2016-04-11 16:20:00 +0000').to_i, nil, nil)
|
177
|
-
@dm3 = create_metadata(Time.parse('2016-04-11 16:30:00 +0000').to_i, nil, nil)
|
178
|
-
@p.start
|
179
|
-
end
|
180
|
-
|
181
|
-
test '#start resumes buffer states and update queued numbers per metadata' do
|
182
|
-
plugin = create_buffer({})
|
183
|
-
|
184
|
-
assert_equal({}, plugin.stage)
|
185
|
-
assert_equal([], plugin.queue)
|
186
|
-
assert_equal({}, plugin.dequeued)
|
187
|
-
assert_equal({}, plugin.queued_num)
|
188
|
-
|
189
|
-
assert_equal 0, plugin.stage_size
|
190
|
-
assert_equal 0, plugin.queue_size
|
191
|
-
assert_equal [], plugin.timekeys
|
192
|
-
|
193
|
-
# @p is started plugin
|
194
|
-
|
195
|
-
assert_equal [@dm2,@dm3], @p.stage.keys
|
196
|
-
assert_equal "b" * 100, @p.stage[@dm2].read
|
197
|
-
assert_equal "c" * 100, @p.stage[@dm3].read
|
198
|
-
|
199
|
-
assert_equal 200, @p.stage_size
|
200
|
-
|
201
|
-
assert_equal 3, @p.queue.size
|
202
|
-
assert_equal "0" * 100, @p.queue[0].read
|
203
|
-
assert_equal "a" * 100, @p.queue[1].read
|
204
|
-
assert_equal "a" * 3, @p.queue[2].read
|
205
|
-
|
206
|
-
assert_equal 203, @p.queue_size
|
207
|
-
|
208
|
-
# staged, queued
|
209
|
-
assert_equal 1, @p.queued_num[@dm0]
|
210
|
-
assert_equal 2, @p.queued_num[@dm1]
|
211
|
-
end
|
212
|
-
|
213
|
-
test '#close closes all chunks in dequeued, enqueued and staged' do
|
214
|
-
dmx = create_metadata(Time.parse('2016-04-11 15:50:00 +0000').to_i, nil, nil)
|
215
|
-
cx = create_chunk(dmx, ["x" * 1024])
|
216
|
-
@p.dequeued[cx.unique_id] = cx
|
217
|
-
|
218
|
-
staged_chunks = @p.stage.values.dup
|
219
|
-
queued_chunks = @p.queue.dup
|
220
|
-
|
221
|
-
@p.close
|
222
|
-
|
223
|
-
assert cx.closed
|
224
|
-
assert{ staged_chunks.all?{|c| c.closed } }
|
225
|
-
assert{ queued_chunks.all?{|c| c.closed } }
|
226
|
-
end
|
227
|
-
|
228
|
-
test '#terminate initializes all internal states' do
|
229
|
-
dmx = create_metadata(Time.parse('2016-04-11 15:50:00 +0000').to_i, nil, nil)
|
230
|
-
cx = create_chunk(dmx, ["x" * 1024])
|
231
|
-
@p.dequeued[cx.unique_id] = cx
|
232
|
-
|
233
|
-
@p.close
|
234
|
-
|
235
|
-
@p.terminate
|
236
|
-
|
237
|
-
assert_nil @p.stage
|
238
|
-
assert_nil @p.queue
|
239
|
-
assert_nil @p.dequeued
|
240
|
-
assert_nil @p.queued_num
|
241
|
-
assert_nil @p.stage_length_metrics
|
242
|
-
assert_nil @p.stage_size_metrics
|
243
|
-
assert_nil @p.queue_length_metrics
|
244
|
-
assert_nil @p.queue_size_metrics
|
245
|
-
assert_nil @p.available_buffer_space_ratios_metrics
|
246
|
-
assert_nil @p.total_queued_size_metrics
|
247
|
-
assert_nil @p.newest_timekey_metrics
|
248
|
-
assert_nil @p.oldest_timekey_metrics
|
249
|
-
assert_equal [], @p.timekeys
|
250
|
-
end
|
251
|
-
|
252
|
-
test '#queued_records returns total number of size in all chunks in queue' do
|
253
|
-
assert_equal 3, @p.queue.size
|
254
|
-
|
255
|
-
r0 = @p.queue[0].size
|
256
|
-
assert_equal 1, r0
|
257
|
-
r1 = @p.queue[1].size
|
258
|
-
assert_equal 1, r1
|
259
|
-
r2 = @p.queue[2].size
|
260
|
-
assert_equal 1, r2
|
261
|
-
|
262
|
-
assert_equal (r0+r1+r2), @p.queued_records
|
263
|
-
end
|
264
|
-
|
265
|
-
test '#queued? returns queue has any chunks or not without arguments' do
|
266
|
-
assert @p.queued?
|
267
|
-
|
268
|
-
@p.queue.reject!{|_c| true }
|
269
|
-
assert !@p.queued?
|
270
|
-
end
|
271
|
-
|
272
|
-
test '#queued? returns queue has chunks for specified metadata with an argument' do
|
273
|
-
assert @p.queued?(@dm0)
|
274
|
-
assert @p.queued?(@dm1)
|
275
|
-
assert !@p.queued?(@dm2)
|
276
|
-
end
|
277
|
-
|
278
|
-
test '#enqueue_chunk enqueues a chunk on stage with specified metadata' do
|
279
|
-
assert_equal 2, @p.stage.size
|
280
|
-
assert_equal [@dm2,@dm3], @p.stage.keys
|
281
|
-
assert_equal 3, @p.queue.size
|
282
|
-
assert_nil @p.queued_num[@dm2]
|
283
|
-
|
284
|
-
assert_equal 200, @p.stage_size
|
285
|
-
assert_equal 203, @p.queue_size
|
286
|
-
|
287
|
-
@p.enqueue_chunk(@dm2)
|
288
|
-
|
289
|
-
assert_equal [@dm3], @p.stage.keys
|
290
|
-
assert_equal @dm2, @p.queue.last.metadata
|
291
|
-
assert_equal 1, @p.queued_num[@dm2]
|
292
|
-
assert_equal 100, @p.stage_size
|
293
|
-
assert_equal 303, @p.queue_size
|
294
|
-
end
|
295
|
-
|
296
|
-
test '#enqueue_chunk ignores empty chunks' do
|
297
|
-
assert_equal 3, @p.queue.size
|
298
|
-
|
299
|
-
m = @p.metadata(timekey: Time.parse('2016-04-11 16:40:00 +0000').to_i)
|
300
|
-
c = create_chunk(m, [''])
|
301
|
-
@p.stage[m] = c
|
302
|
-
assert @p.stage[m].empty?
|
303
|
-
assert !c.closed
|
304
|
-
|
305
|
-
@p.enqueue_chunk(m)
|
306
|
-
|
307
|
-
assert_nil @p.stage[m]
|
308
|
-
assert_equal 3, @p.queue.size
|
309
|
-
assert_nil @p.queued_num[m]
|
310
|
-
assert c.closed
|
311
|
-
end
|
312
|
-
|
313
|
-
test '#enqueue_chunk calls #enqueued! if chunk responds to it' do
|
314
|
-
assert_equal 3, @p.queue.size
|
315
|
-
m = @p.metadata(timekey: Time.parse('2016-04-11 16:40:00 +0000').to_i)
|
316
|
-
c = create_chunk(m, ['c' * 256])
|
317
|
-
callback_called = false
|
318
|
-
(class << c; self; end).module_eval do
|
319
|
-
define_method(:enqueued!){ callback_called = true }
|
320
|
-
end
|
321
|
-
|
322
|
-
@p.stage[m] = c
|
323
|
-
@p.enqueue_chunk(m)
|
324
|
-
|
325
|
-
assert_equal c, @p.queue.last
|
326
|
-
assert callback_called
|
327
|
-
end
|
328
|
-
|
329
|
-
test '#enqueue_all enqueues chunks on stage which given block returns true with' do
|
330
|
-
m1 = @p.metadata(timekey: Time.parse('2016-04-11 16:40:00 +0000').to_i)
|
331
|
-
c1 = create_chunk(m1, ['c' * 256])
|
332
|
-
@p.stage[m1] = c1
|
333
|
-
m2 = @p.metadata(timekey: Time.parse('2016-04-11 16:50:00 +0000').to_i)
|
334
|
-
c2 = create_chunk(m2, ['c' * 256])
|
335
|
-
@p.stage[m2] = c2
|
336
|
-
|
337
|
-
assert_equal [@dm2,@dm3,m1,m2], @p.stage.keys
|
338
|
-
assert_equal [@dm0,@dm1,@dm1], @p.queue.map(&:metadata)
|
339
|
-
|
340
|
-
@p.enqueue_all{ |m, c| m.timekey < Time.parse('2016-04-11 16:41:00 +0000').to_i }
|
341
|
-
|
342
|
-
assert_equal [m2], @p.stage.keys
|
343
|
-
assert_equal [@dm0,@dm1,@dm1,@dm2,@dm3,m1], @p.queue.map(&:metadata)
|
344
|
-
end
|
345
|
-
|
346
|
-
test '#enqueue_all enqueues all chunks on stage without block' do
|
347
|
-
m1 = @p.metadata(timekey: Time.parse('2016-04-11 16:40:00 +0000').to_i)
|
348
|
-
c1 = create_chunk(m1, ['c' * 256])
|
349
|
-
@p.stage[m1] = c1
|
350
|
-
m2 = @p.metadata(timekey: Time.parse('2016-04-11 16:50:00 +0000').to_i)
|
351
|
-
c2 = create_chunk(m2, ['c' * 256])
|
352
|
-
@p.stage[m2] = c2
|
353
|
-
|
354
|
-
assert_equal [@dm2,@dm3,m1,m2], @p.stage.keys
|
355
|
-
assert_equal [@dm0,@dm1,@dm1], @p.queue.map(&:metadata)
|
356
|
-
|
357
|
-
@p.enqueue_all
|
358
|
-
|
359
|
-
assert_equal [], @p.stage.keys
|
360
|
-
assert_equal [@dm0,@dm1,@dm1,@dm2,@dm3,m1,m2], @p.queue.map(&:metadata)
|
361
|
-
end
|
362
|
-
|
363
|
-
test '#dequeue_chunk dequeues a chunk from queue if a chunk exists' do
|
364
|
-
assert_equal [@dm0,@dm1,@dm1], @p.queue.map(&:metadata)
|
365
|
-
assert_equal({}, @p.dequeued)
|
366
|
-
|
367
|
-
m1 = @p.dequeue_chunk
|
368
|
-
assert_equal @dm0, m1.metadata
|
369
|
-
assert_equal @dm0, @p.dequeued[m1.unique_id].metadata
|
370
|
-
|
371
|
-
m2 = @p.dequeue_chunk
|
372
|
-
assert_equal @dm1, m2.metadata
|
373
|
-
assert_equal @dm1, @p.dequeued[m2.unique_id].metadata
|
374
|
-
|
375
|
-
m3 = @p.dequeue_chunk
|
376
|
-
assert_equal @dm1, m3.metadata
|
377
|
-
assert_equal @dm1, @p.dequeued[m3.unique_id].metadata
|
378
|
-
|
379
|
-
m4 = @p.dequeue_chunk
|
380
|
-
assert_nil m4
|
381
|
-
end
|
382
|
-
|
383
|
-
test '#takeback_chunk resumes a chunk from dequeued to queued at the head of queue, and returns true' do
|
384
|
-
assert_equal [@dm0,@dm1,@dm1], @p.queue.map(&:metadata)
|
385
|
-
assert_equal({}, @p.dequeued)
|
386
|
-
|
387
|
-
m1 = @p.dequeue_chunk
|
388
|
-
assert_equal @dm0, m1.metadata
|
389
|
-
assert_equal @dm0, @p.dequeued[m1.unique_id].metadata
|
390
|
-
assert_equal [@dm1,@dm1], @p.queue.map(&:metadata)
|
391
|
-
assert_equal({m1.unique_id => m1}, @p.dequeued)
|
392
|
-
|
393
|
-
assert @p.takeback_chunk(m1.unique_id)
|
394
|
-
|
395
|
-
assert_equal [@dm0,@dm1,@dm1], @p.queue.map(&:metadata)
|
396
|
-
assert_equal({}, @p.dequeued)
|
397
|
-
end
|
398
|
-
|
399
|
-
test '#purge_chunk removes a chunk specified by argument id from dequeued chunks' do
|
400
|
-
assert_equal [@dm0,@dm1,@dm1], @p.queue.map(&:metadata)
|
401
|
-
assert_equal({}, @p.dequeued)
|
402
|
-
|
403
|
-
m0 = @p.dequeue_chunk
|
404
|
-
m1 = @p.dequeue_chunk
|
405
|
-
|
406
|
-
assert @p.takeback_chunk(m0.unique_id)
|
407
|
-
|
408
|
-
assert_equal [@dm0,@dm1], @p.queue.map(&:metadata)
|
409
|
-
assert_equal({m1.unique_id => m1}, @p.dequeued)
|
410
|
-
|
411
|
-
assert !m1.purged
|
412
|
-
|
413
|
-
@p.purge_chunk(m1.unique_id)
|
414
|
-
assert m1.purged
|
415
|
-
|
416
|
-
assert_equal [@dm0,@dm1], @p.queue.map(&:metadata)
|
417
|
-
assert_equal({}, @p.dequeued)
|
418
|
-
end
|
419
|
-
|
420
|
-
test '#purge_chunk removes an argument metadata if no chunks exist on stage or in queue' do
|
421
|
-
assert_equal [@dm0,@dm1,@dm1], @p.queue.map(&:metadata)
|
422
|
-
assert_equal({}, @p.dequeued)
|
423
|
-
|
424
|
-
m0 = @p.dequeue_chunk
|
425
|
-
|
426
|
-
assert_equal [@dm1,@dm1], @p.queue.map(&:metadata)
|
427
|
-
assert_equal({m0.unique_id => m0}, @p.dequeued)
|
428
|
-
|
429
|
-
assert !m0.purged
|
430
|
-
|
431
|
-
@p.purge_chunk(m0.unique_id)
|
432
|
-
assert m0.purged
|
433
|
-
|
434
|
-
assert_equal [@dm1,@dm1], @p.queue.map(&:metadata)
|
435
|
-
assert_equal({}, @p.dequeued)
|
436
|
-
end
|
437
|
-
|
438
|
-
test '#takeback_chunk returns false if specified chunk_id is already purged' do
|
439
|
-
assert_equal [@dm0,@dm1,@dm1], @p.queue.map(&:metadata)
|
440
|
-
assert_equal({}, @p.dequeued)
|
441
|
-
|
442
|
-
m0 = @p.dequeue_chunk
|
443
|
-
|
444
|
-
assert_equal [@dm1,@dm1], @p.queue.map(&:metadata)
|
445
|
-
assert_equal({m0.unique_id => m0}, @p.dequeued)
|
446
|
-
|
447
|
-
assert !m0.purged
|
448
|
-
|
449
|
-
@p.purge_chunk(m0.unique_id)
|
450
|
-
assert m0.purged
|
451
|
-
|
452
|
-
assert_equal [@dm1,@dm1], @p.queue.map(&:metadata)
|
453
|
-
assert_equal({}, @p.dequeued)
|
454
|
-
|
455
|
-
assert !@p.takeback_chunk(m0.unique_id)
|
456
|
-
|
457
|
-
assert_equal [@dm1,@dm1], @p.queue.map(&:metadata)
|
458
|
-
assert_equal({}, @p.dequeued)
|
459
|
-
end
|
460
|
-
|
461
|
-
test '#clear_queue! removes all chunks in queue, but leaves staged chunks' do
|
462
|
-
qchunks = @p.queue.dup
|
463
|
-
|
464
|
-
assert_equal [@dm0,@dm1,@dm1], @p.queue.map(&:metadata)
|
465
|
-
assert_equal 2, @p.stage.size
|
466
|
-
assert_equal({}, @p.dequeued)
|
467
|
-
|
468
|
-
@p.clear_queue!
|
469
|
-
|
470
|
-
assert_equal [], @p.queue
|
471
|
-
assert_equal 0, @p.queue_size
|
472
|
-
assert_equal 2, @p.stage.size
|
473
|
-
assert_equal({}, @p.dequeued)
|
474
|
-
|
475
|
-
assert{ qchunks.all?{ |c| c.purged } }
|
476
|
-
end
|
477
|
-
|
478
|
-
test '#write returns immediately if argument data is empty array' do
|
479
|
-
assert_equal [@dm0,@dm1,@dm1], @p.queue.map(&:metadata)
|
480
|
-
assert_equal [@dm2,@dm3], @p.stage.keys
|
481
|
-
|
482
|
-
m = @p.metadata(timekey: Time.parse('2016-04-11 16:40:00 +0000').to_i)
|
483
|
-
|
484
|
-
@p.write({m => []})
|
485
|
-
|
486
|
-
assert_equal [@dm0,@dm1,@dm1], @p.queue.map(&:metadata)
|
487
|
-
assert_equal [@dm2,@dm3], @p.stage.keys
|
488
|
-
end
|
489
|
-
|
490
|
-
test '#write returns immediately if argument data is empty event stream' do
|
491
|
-
assert_equal [@dm0,@dm1,@dm1], @p.queue.map(&:metadata)
|
492
|
-
assert_equal [@dm2,@dm3], @p.stage.keys
|
493
|
-
|
494
|
-
m = @p.metadata(timekey: Time.parse('2016-04-11 16:40:00 +0000').to_i)
|
495
|
-
|
496
|
-
@p.write({m => Fluent::ArrayEventStream.new([])})
|
497
|
-
|
498
|
-
assert_equal [@dm0,@dm1,@dm1], @p.queue.map(&:metadata)
|
499
|
-
assert_equal [@dm2,@dm3], @p.stage.keys
|
500
|
-
end
|
501
|
-
|
502
|
-
test '#write raises BufferOverflowError if buffer is not storable' do
|
503
|
-
@p.stage_size = 256 * 1024 * 1024
|
504
|
-
@p.queue_size = 256 * 1024 * 1024
|
505
|
-
|
506
|
-
m = @p.metadata(timekey: Time.parse('2016-04-11 16:40:00 +0000').to_i)
|
507
|
-
|
508
|
-
assert_raise Fluent::Plugin::Buffer::BufferOverflowError do
|
509
|
-
@p.write({m => ["x" * 256]})
|
510
|
-
end
|
511
|
-
end
|
512
|
-
|
513
|
-
test '#write stores data into an existing chunk with metadata specified' do
|
514
|
-
assert_equal [@dm0,@dm1,@dm1], @p.queue.map(&:metadata)
|
515
|
-
assert_equal [@dm2,@dm3], @p.stage.keys
|
516
|
-
|
517
|
-
dm3data = @p.stage[@dm3].read.dup
|
518
|
-
prev_stage_size = @p.stage_size
|
519
|
-
|
520
|
-
assert_equal 1, @p.stage[@dm3].append_count
|
521
|
-
|
522
|
-
@p.write({@dm3 => ["x" * 256, "y" * 256, "z" * 256]})
|
523
|
-
|
524
|
-
assert_equal 2, @p.stage[@dm3].append_count
|
525
|
-
assert_equal (dm3data + ("x" * 256) + ("y" * 256) + ("z" * 256)), @p.stage[@dm3].read
|
526
|
-
assert_equal (prev_stage_size + 768), @p.stage_size
|
527
|
-
|
528
|
-
assert_equal [@dm0,@dm1,@dm1], @p.queue.map(&:metadata)
|
529
|
-
assert_equal [@dm2,@dm3], @p.stage.keys
|
530
|
-
end
|
531
|
-
|
532
|
-
test '#write creates new chunk and store data into it if there are no chunks for specified metadata' do
|
533
|
-
assert_equal [@dm0,@dm1,@dm1], @p.queue.map(&:metadata)
|
534
|
-
assert_equal [@dm2,@dm3], @p.stage.keys
|
535
|
-
|
536
|
-
timekey = Time.parse('2016-04-11 16:40:00 +0000').to_i
|
537
|
-
assert !@p.timekeys.include?(timekey)
|
538
|
-
|
539
|
-
prev_stage_size = @p.stage_size
|
540
|
-
|
541
|
-
m = @p.metadata(timekey: timekey)
|
542
|
-
|
543
|
-
@p.write({m => ["x" * 256, "y" * 256, "z" * 256]})
|
544
|
-
|
545
|
-
assert_equal 1, @p.stage[m].append_count
|
546
|
-
assert_equal ("x" * 256 + "y" * 256 + "z" * 256), @p.stage[m].read
|
547
|
-
assert_equal (prev_stage_size + 768), @p.stage_size
|
548
|
-
|
549
|
-
assert_equal [@dm0,@dm1,@dm1], @p.queue.map(&:metadata)
|
550
|
-
assert_equal [@dm2,@dm3,m], @p.stage.keys
|
551
|
-
|
552
|
-
@p.update_timekeys
|
553
|
-
|
554
|
-
assert @p.timekeys.include?(timekey)
|
555
|
-
end
|
556
|
-
|
557
|
-
test '#write tries to enqueue and store data into a new chunk if existing chunk is full' do
|
558
|
-
assert_equal 8 * 1024 * 1024, @p.chunk_limit_size
|
559
|
-
assert_equal 0.95, @p.chunk_full_threshold
|
560
|
-
|
561
|
-
assert_equal [@dm0,@dm1,@dm1], @p.queue.map(&:metadata)
|
562
|
-
assert_equal [@dm2,@dm3], @p.stage.keys
|
563
|
-
|
564
|
-
m = @p.metadata(timekey: Time.parse('2016-04-11 16:40:00 +0000').to_i)
|
565
|
-
|
566
|
-
row = "x" * 1024 * 1024
|
567
|
-
small_row = "x" * 1024 * 512
|
568
|
-
@p.write({m => [row] * 7 + [small_row]})
|
569
|
-
|
570
|
-
assert_equal [@dm0,@dm1,@dm1], @p.queue.map(&:metadata)
|
571
|
-
assert_equal [@dm2,@dm3,m], @p.stage.keys
|
572
|
-
assert_equal 1, @p.stage[m].append_count
|
573
|
-
|
574
|
-
@p.write({m => [row]})
|
575
|
-
|
576
|
-
assert_equal [@dm0,@dm1,@dm1,m], @p.queue.map(&:metadata)
|
577
|
-
assert_equal [@dm2,@dm3,m], @p.stage.keys
|
578
|
-
assert_equal 1, @p.stage[m].append_count
|
579
|
-
assert_equal 1024*1024, @p.stage[m].bytesize
|
580
|
-
assert_equal 3, @p.queue.last.append_count # 1 -> write (2) -> write_step_by_step (3)
|
581
|
-
assert @p.queue.last.rollbacked
|
582
|
-
end
|
583
|
-
|
584
|
-
test '#write rollbacks if commit raises errors' do
|
585
|
-
assert_equal [@dm0,@dm1,@dm1], @p.queue.map(&:metadata)
|
586
|
-
assert_equal [@dm2,@dm3], @p.stage.keys
|
587
|
-
|
588
|
-
m = @p.metadata(timekey: Time.parse('2016-04-11 16:40:00 +0000').to_i)
|
589
|
-
|
590
|
-
row = "x" * 1024
|
591
|
-
@p.write({m => [row] * 8})
|
592
|
-
|
593
|
-
assert_equal [@dm0,@dm1,@dm1], @p.queue.map(&:metadata)
|
594
|
-
assert_equal [@dm2,@dm3,m], @p.stage.keys
|
595
|
-
|
596
|
-
target_chunk = @p.stage[m]
|
597
|
-
|
598
|
-
assert_equal 1, target_chunk.append_count
|
599
|
-
assert !target_chunk.rollbacked
|
600
|
-
|
601
|
-
(class << target_chunk; self; end).module_eval do
|
602
|
-
define_method(:commit){ raise "yay" }
|
603
|
-
end
|
604
|
-
|
605
|
-
assert_raise RuntimeError.new("yay") do
|
606
|
-
@p.write({m => [row]})
|
607
|
-
end
|
608
|
-
|
609
|
-
assert_equal [@dm0,@dm1,@dm1], @p.queue.map(&:metadata)
|
610
|
-
assert_equal [@dm2,@dm3,m], @p.stage.keys
|
611
|
-
|
612
|
-
assert_equal 2, target_chunk.append_count
|
613
|
-
assert target_chunk.rollbacked
|
614
|
-
assert_equal row * 8, target_chunk.read
|
615
|
-
end
|
616
|
-
|
617
|
-
test '#write w/ format raises BufferOverflowError if buffer is not storable' do
|
618
|
-
@p.stage_size = 256 * 1024 * 1024
|
619
|
-
@p.queue_size = 256 * 1024 * 1024
|
620
|
-
|
621
|
-
m = @p.metadata(timekey: Time.parse('2016-04-11 16:40:00 +0000').to_i)
|
622
|
-
|
623
|
-
es = Fluent::ArrayEventStream.new([ [event_time('2016-04-11 16:40:01 +0000'), {"message" => "xxxxxxxxxxxxxx"} ] ])
|
624
|
-
|
625
|
-
assert_raise Fluent::Plugin::Buffer::BufferOverflowError do
|
626
|
-
@p.write({m => es}, format: ->(e){e.to_msgpack_stream})
|
627
|
-
end
|
628
|
-
end
|
629
|
-
|
630
|
-
test '#write w/ format stores data into an existing chunk with metadata specified' do
|
631
|
-
assert_equal [@dm0,@dm1,@dm1], @p.queue.map(&:metadata)
|
632
|
-
assert_equal [@dm2,@dm3], @p.stage.keys
|
633
|
-
|
634
|
-
dm3data = @p.stage[@dm3].read.dup
|
635
|
-
prev_stage_size = @p.stage_size
|
636
|
-
|
637
|
-
assert_equal 1, @p.stage[@dm3].append_count
|
638
|
-
|
639
|
-
es = Fluent::ArrayEventStream.new(
|
640
|
-
[
|
641
|
-
[event_time('2016-04-11 16:40:01 +0000'), {"message" => "x" * 128}],
|
642
|
-
[event_time('2016-04-11 16:40:01 +0000'), {"message" => "y" * 128}],
|
643
|
-
[event_time('2016-04-11 16:40:01 +0000'), {"message" => "z" * 128}],
|
644
|
-
]
|
645
|
-
)
|
646
|
-
|
647
|
-
@p.write({@dm3 => es}, format: ->(e){e.to_msgpack_stream})
|
648
|
-
|
649
|
-
assert_equal 2, @p.stage[@dm3].append_count
|
650
|
-
assert_equal (dm3data + es.to_msgpack_stream), @p.stage[@dm3].read
|
651
|
-
assert_equal (prev_stage_size + es.to_msgpack_stream.bytesize), @p.stage_size
|
652
|
-
|
653
|
-
assert_equal [@dm0,@dm1,@dm1], @p.queue.map(&:metadata)
|
654
|
-
assert_equal [@dm2,@dm3], @p.stage.keys
|
655
|
-
end
|
656
|
-
|
657
|
-
test '#write w/ format creates new chunk and store data into it if there are not chunks for specified metadata' do
|
658
|
-
assert_equal 8 * 1024 * 1024, @p.chunk_limit_size
|
659
|
-
|
660
|
-
assert_equal [@dm0,@dm1,@dm1], @p.queue.map(&:metadata)
|
661
|
-
assert_equal [@dm2,@dm3], @p.stage.keys
|
662
|
-
|
663
|
-
timekey = Time.parse('2016-04-11 16:40:00 +0000').to_i
|
664
|
-
assert !@p.timekeys.include?(timekey)
|
665
|
-
|
666
|
-
m = @p.metadata(timekey: timekey)
|
667
|
-
|
668
|
-
es = Fluent::ArrayEventStream.new(
|
669
|
-
[
|
670
|
-
[event_time('2016-04-11 16:40:01 +0000'), {"message" => "x" * 1024 * 1024}],
|
671
|
-
[event_time('2016-04-11 16:40:01 +0000'), {"message" => "x" * 1024 * 1024}],
|
672
|
-
[event_time('2016-04-11 16:40:01 +0000'), {"message" => "x" * 1024 * 1024}],
|
673
|
-
[event_time('2016-04-11 16:40:01 +0000'), {"message" => "x" * 1024 * 1024}],
|
674
|
-
[event_time('2016-04-11 16:40:01 +0000'), {"message" => "x" * 1024 * 1024}],
|
675
|
-
[event_time('2016-04-11 16:40:01 +0000'), {"message" => "x" * 1024 * 1024}],
|
676
|
-
[event_time('2016-04-11 16:40:01 +0000'), {"message" => "x" * 1024 * 1024}],
|
677
|
-
[event_time('2016-04-11 16:40:03 +0000'), {"message" => "z" * 1024 * 512}],
|
678
|
-
]
|
679
|
-
)
|
680
|
-
@p.write({m => es}, format: ->(e){e.to_msgpack_stream})
|
681
|
-
|
682
|
-
assert_equal [@dm0,@dm1,@dm1], @p.queue.map(&:metadata)
|
683
|
-
assert_equal [@dm2,@dm3,m], @p.stage.keys
|
684
|
-
assert_equal 1, @p.stage[m].append_count
|
685
|
-
|
686
|
-
@p.update_timekeys
|
687
|
-
|
688
|
-
assert @p.timekeys.include?(timekey)
|
689
|
-
end
|
690
|
-
|
691
|
-
test '#write w/ format tries to enqueue and store data into a new chunk if existing chunk does not have enough space' do
|
692
|
-
assert_equal 8 * 1024 * 1024, @p.chunk_limit_size
|
693
|
-
|
694
|
-
assert_equal [@dm0,@dm1,@dm1], @p.queue.map(&:metadata)
|
695
|
-
assert_equal [@dm2,@dm3], @p.stage.keys
|
696
|
-
|
697
|
-
m = @p.metadata(timekey: Time.parse('2016-04-11 16:40:00 +0000').to_i)
|
698
|
-
|
699
|
-
es = Fluent::ArrayEventStream.new(
|
700
|
-
[
|
701
|
-
[event_time('2016-04-11 16:40:01 +0000'), {"message" => "x" * 1024 * 1024}],
|
702
|
-
[event_time('2016-04-11 16:40:01 +0000'), {"message" => "x" * 1024 * 1024}],
|
703
|
-
[event_time('2016-04-11 16:40:01 +0000'), {"message" => "x" * 1024 * 1024}],
|
704
|
-
[event_time('2016-04-11 16:40:01 +0000'), {"message" => "x" * 1024 * 1024}],
|
705
|
-
[event_time('2016-04-11 16:40:01 +0000'), {"message" => "x" * 1024 * 1024}],
|
706
|
-
[event_time('2016-04-11 16:40:01 +0000'), {"message" => "x" * 1024 * 1024}],
|
707
|
-
[event_time('2016-04-11 16:40:01 +0000'), {"message" => "x" * 1024 * 1024}],
|
708
|
-
[event_time('2016-04-11 16:40:03 +0000'), {"message" => "z" * 1024 * 512}],
|
709
|
-
]
|
710
|
-
)
|
711
|
-
@p.write({m => es}, format: ->(e){e.to_msgpack_stream})
|
712
|
-
|
713
|
-
assert_equal [@dm0,@dm1,@dm1], @p.queue.map(&:metadata)
|
714
|
-
assert_equal [@dm2,@dm3,m], @p.stage.keys
|
715
|
-
assert_equal 1, @p.stage[m].append_count
|
716
|
-
|
717
|
-
es2 = Fluent::OneEventStream.new(event_time('2016-04-11 16:40:03 +0000'), {"message" => "z" * 1024 * 1024})
|
718
|
-
@p.write({m => es2}, format: ->(e){e.to_msgpack_stream})
|
719
|
-
|
720
|
-
assert_equal [@dm0,@dm1,@dm1,m], @p.queue.map(&:metadata)
|
721
|
-
assert_equal [@dm2,@dm3,m], @p.stage.keys
|
722
|
-
assert_equal 1, @p.stage[m].append_count
|
723
|
-
assert_equal es2.to_msgpack_stream.bytesize, @p.stage[m].bytesize
|
724
|
-
assert_equal 2, @p.queue.last.append_count # 1 -> write (2) -> rollback&enqueue
|
725
|
-
assert @p.queue.last.rollbacked
|
726
|
-
end
|
727
|
-
|
728
|
-
test '#write w/ format enqueues chunk if it is already full after adding data' do
|
729
|
-
assert_equal 8 * 1024 * 1024, @p.chunk_limit_size
|
730
|
-
|
731
|
-
assert_equal [@dm0,@dm1,@dm1], @p.queue.map(&:metadata)
|
732
|
-
assert_equal [@dm2,@dm3], @p.stage.keys
|
733
|
-
|
734
|
-
m = @p.metadata(timekey: Time.parse('2016-04-11 16:40:00 +0000').to_i)
|
735
|
-
es = Fluent::ArrayEventStream.new(
|
736
|
-
[
|
737
|
-
[event_time('2016-04-11 16:40:01 +0000'), {"message" => "x" * (1024 * 1024 - 25)}], # 1024 * 1024 bytes as msgpack stream
|
738
|
-
[event_time('2016-04-11 16:40:01 +0000'), {"message" => "x" * (1024 * 1024 - 25)}],
|
739
|
-
[event_time('2016-04-11 16:40:01 +0000'), {"message" => "x" * (1024 * 1024 - 25)}],
|
740
|
-
[event_time('2016-04-11 16:40:01 +0000'), {"message" => "x" * (1024 * 1024 - 25)}],
|
741
|
-
[event_time('2016-04-11 16:40:01 +0000'), {"message" => "x" * (1024 * 1024 - 25)}],
|
742
|
-
[event_time('2016-04-11 16:40:01 +0000'), {"message" => "x" * (1024 * 1024 - 25)}],
|
743
|
-
[event_time('2016-04-11 16:40:01 +0000'), {"message" => "x" * (1024 * 1024 - 25)}],
|
744
|
-
[event_time('2016-04-11 16:40:01 +0000'), {"message" => "x" * (1024 * 1024 - 25)}],
|
745
|
-
]
|
746
|
-
)
|
747
|
-
@p.write({m => es}, format: ->(e){e.to_msgpack_stream})
|
748
|
-
|
749
|
-
assert_equal [@dm0,@dm1,@dm1,m], @p.queue.map(&:metadata)
|
750
|
-
assert_equal [@dm2,@dm3], @p.stage.keys
|
751
|
-
assert_equal 1, @p.queue.last.append_count
|
752
|
-
end
|
753
|
-
|
754
|
-
test '#write w/ format rollbacks if commit raises errors' do
|
755
|
-
assert_equal [@dm0,@dm1,@dm1], @p.queue.map(&:metadata)
|
756
|
-
assert_equal [@dm2,@dm3], @p.stage.keys
|
757
|
-
|
758
|
-
m = @p.metadata(timekey: Time.parse('2016-04-11 16:40:00 +0000').to_i)
|
759
|
-
|
760
|
-
es = Fluent::ArrayEventStream.new(
|
761
|
-
[
|
762
|
-
[event_time('2016-04-11 16:40:01 +0000'), {"message" => "x" * 1024 * 1024}],
|
763
|
-
[event_time('2016-04-11 16:40:01 +0000'), {"message" => "x" * 1024 * 1024}],
|
764
|
-
[event_time('2016-04-11 16:40:01 +0000'), {"message" => "x" * 1024 * 1024}],
|
765
|
-
[event_time('2016-04-11 16:40:01 +0000'), {"message" => "x" * 1024 * 1024}],
|
766
|
-
[event_time('2016-04-11 16:40:01 +0000'), {"message" => "x" * 1024 * 1024}],
|
767
|
-
[event_time('2016-04-11 16:40:01 +0000'), {"message" => "x" * 1024 * 1024}],
|
768
|
-
[event_time('2016-04-11 16:40:01 +0000'), {"message" => "x" * 1024 * 1024}],
|
769
|
-
[event_time('2016-04-11 16:40:03 +0000'), {"message" => "z" * 1024 * 512}],
|
770
|
-
]
|
771
|
-
)
|
772
|
-
@p.write({m => es}, format: ->(e){e.to_msgpack_stream})
|
773
|
-
|
774
|
-
assert_equal [@dm0,@dm1,@dm1], @p.queue.map(&:metadata)
|
775
|
-
assert_equal [@dm2,@dm3,m], @p.stage.keys
|
776
|
-
|
777
|
-
target_chunk = @p.stage[m]
|
778
|
-
|
779
|
-
assert_equal 1, target_chunk.append_count
|
780
|
-
assert !target_chunk.rollbacked
|
781
|
-
|
782
|
-
(class << target_chunk; self; end).module_eval do
|
783
|
-
define_method(:commit){ raise "yay" }
|
784
|
-
end
|
785
|
-
|
786
|
-
es2 = Fluent::ArrayEventStream.new(
|
787
|
-
[
|
788
|
-
[event_time('2016-04-11 16:40:04 +0000'), {"message" => "z" * 1024 * 128}],
|
789
|
-
]
|
790
|
-
)
|
791
|
-
assert_raise RuntimeError.new("yay") do
|
792
|
-
@p.write({m => es2}, format: ->(e){e.to_msgpack_stream})
|
793
|
-
end
|
794
|
-
|
795
|
-
assert_equal [@dm0,@dm1,@dm1], @p.queue.map(&:metadata)
|
796
|
-
assert_equal [@dm2,@dm3,m], @p.stage.keys
|
797
|
-
|
798
|
-
assert_equal 2, target_chunk.append_count
|
799
|
-
assert target_chunk.rollbacked
|
800
|
-
assert_equal es.to_msgpack_stream, target_chunk.read
|
801
|
-
end
|
802
|
-
|
803
|
-
test '#write writes many metadata and data pairs at once' do
|
804
|
-
assert_equal [@dm0,@dm1,@dm1], @p.queue.map(&:metadata)
|
805
|
-
assert_equal [@dm2,@dm3], @p.stage.keys
|
806
|
-
|
807
|
-
row = "x" * 1024
|
808
|
-
@p.write({ @dm0 => [row, row, row], @dm1 => [row, row] })
|
809
|
-
|
810
|
-
assert_equal [@dm2,@dm3,@dm0,@dm1], @p.stage.keys
|
811
|
-
end
|
812
|
-
|
813
|
-
test '#write does not commit on any chunks if any append operation on chunk fails' do
|
814
|
-
assert_equal [@dm0,@dm1,@dm1], @p.queue.map(&:metadata)
|
815
|
-
assert_equal [@dm2,@dm3], @p.stage.keys
|
816
|
-
|
817
|
-
row = "x" * 1024
|
818
|
-
@p.write({ @dm0 => [row, row, row], @dm1 => [row, row] })
|
819
|
-
|
820
|
-
assert_equal [@dm2,@dm3,@dm0,@dm1], @p.stage.keys
|
821
|
-
|
822
|
-
dm2_size = @p.stage[@dm2].size
|
823
|
-
assert !@p.stage[@dm2].rollbacked
|
824
|
-
dm3_size = @p.stage[@dm3].size
|
825
|
-
assert !@p.stage[@dm3].rollbacked
|
826
|
-
|
827
|
-
assert{ @p.stage[@dm0].size == 3 }
|
828
|
-
assert !@p.stage[@dm0].rollbacked
|
829
|
-
assert{ @p.stage[@dm1].size == 2 }
|
830
|
-
assert !@p.stage[@dm1].rollbacked
|
831
|
-
|
832
|
-
meta_list = [@dm0, @dm1, @dm2, @dm3].sort
|
833
|
-
@p.stage[meta_list.last].failing = true
|
834
|
-
|
835
|
-
assert_raise(FluentPluginBufferTest::DummyMemoryChunkError) do
|
836
|
-
@p.write({ @dm2 => [row], @dm3 => [row], @dm0 => [row, row, row], @dm1 => [row, row] })
|
837
|
-
end
|
838
|
-
|
839
|
-
assert{ @p.stage[@dm2].size == dm2_size }
|
840
|
-
assert @p.stage[@dm2].rollbacked
|
841
|
-
assert{ @p.stage[@dm3].size == dm3_size }
|
842
|
-
assert @p.stage[@dm3].rollbacked
|
843
|
-
|
844
|
-
assert{ @p.stage[@dm0].size == 3 }
|
845
|
-
assert @p.stage[@dm0].rollbacked
|
846
|
-
assert{ @p.stage[@dm1].size == 2 }
|
847
|
-
assert @p.stage[@dm1].rollbacked
|
848
|
-
end
|
849
|
-
|
850
|
-
test '#compress returns :text' do
|
851
|
-
assert_equal :text, @p.compress
|
852
|
-
end
|
853
|
-
|
854
|
-
# https://github.com/fluent/fluentd/issues/3089
|
855
|
-
test "closed chunk should not be committed" do
|
856
|
-
assert_equal 8 * 1024 * 1024, @p.chunk_limit_size
|
857
|
-
assert_equal 0.95, @p.chunk_full_threshold
|
858
|
-
|
859
|
-
purge_count = 0
|
860
|
-
|
861
|
-
stub.proxy(@p).generate_chunk(anything) do |chunk|
|
862
|
-
stub.proxy(chunk).purge do |result|
|
863
|
-
purge_count += 1
|
864
|
-
result
|
865
|
-
end
|
866
|
-
stub.proxy(chunk).commit do |result|
|
867
|
-
assert_false(chunk.closed?)
|
868
|
-
result
|
869
|
-
end
|
870
|
-
stub.proxy(chunk).rollback do |result|
|
871
|
-
assert_false(chunk.closed?)
|
872
|
-
result
|
873
|
-
end
|
874
|
-
chunk
|
875
|
-
end
|
876
|
-
|
877
|
-
m = @p.metadata(timekey: Time.parse('2016-04-11 16:40:00 +0000').to_i)
|
878
|
-
small_row = "x" * 1024 * 400
|
879
|
-
big_row = "x" * 1024 * 1024 * 8 # just `chunk_size_limit`, it does't cause BufferOverFlowError.
|
880
|
-
|
881
|
-
# Write 42 events in 1 event stream, last one is for triggering `ShouldRetry`
|
882
|
-
@p.write({m => [small_row] * 40 + [big_row] + ["x"]})
|
883
|
-
|
884
|
-
# Above event strem will be splitted twice by `Buffer#write_step_by_step`
|
885
|
-
#
|
886
|
-
# 1. `write_once`: 42 [events] * 1 [stream]
|
887
|
-
# 2. `write_step_by_step`: 4 [events]* 10 [streams] + 2 [events] * 1 [stream]
|
888
|
-
# 3. `write_step_by_step` (by `ShouldRetry`): 1 [event] * 42 [streams]
|
889
|
-
#
|
890
|
-
# The problematic data is built in the 2nd stage.
|
891
|
-
# In the 2nd stage, 5 streams are packed in a chunk.
|
892
|
-
# ((1024 * 400) [bytes] * 4 [events] * 5 [streams] = 8192000 [bytes] < `chunk_limit_size` (8MB)).
|
893
|
-
# So 3 chunks are used to store all data.
|
894
|
-
# The 1st chunk is already staged by `write_once`.
|
895
|
-
# The 2nd & 3rd chunks are newly created as unstaged.
|
896
|
-
# The 3rd chunk is purged before `ShouldRetry`, it's no problem:
|
897
|
-
# https://github.com/fluent/fluentd/blob/7e9eba736ff40ad985341be800ddc46558be75f2/lib/fluent/plugin/buffer.rb#L850
|
898
|
-
# The 2nd chunk is purged in `rescue ShouldRetry`:
|
899
|
-
# https://github.com/fluent/fluentd/blob/7e9eba736ff40ad985341be800ddc46558be75f2/lib/fluent/plugin/buffer.rb#L862
|
900
|
-
# It causes the issue described in https://github.com/fluent/fluentd/issues/3089#issuecomment-1811839198
|
901
|
-
|
902
|
-
assert_equal 2, purge_count
|
903
|
-
end
|
904
|
-
|
905
|
-
# https://github.com/fluent/fluentd/issues/4446
|
906
|
-
test "#write_step_by_step keeps chunks kept in locked in entire #write process" do
|
907
|
-
assert_equal 8 * 1024 * 1024, @p.chunk_limit_size
|
908
|
-
assert_equal 0.95, @p.chunk_full_threshold
|
909
|
-
|
910
|
-
mon_enter_counts_by_chunk = {}
|
911
|
-
mon_exit_counts_by_chunk = {}
|
912
|
-
|
913
|
-
stub.proxy(@p).generate_chunk(anything) do |chunk|
|
914
|
-
stub(chunk).mon_enter do
|
915
|
-
enter_count = 1 + mon_enter_counts_by_chunk.fetch(chunk, 0)
|
916
|
-
exit_count = mon_exit_counts_by_chunk.fetch(chunk, 0)
|
917
|
-
mon_enter_counts_by_chunk[chunk] = enter_count
|
918
|
-
|
919
|
-
# Assert that chunk is passed to &block of write_step_by_step before exiting the lock.
|
920
|
-
# (i.e. The lock count must be 2 greater than the exit count).
|
921
|
-
# Since ShouldRetry occurs once, the staged chunk takes the lock 3 times when calling the block.
|
922
|
-
if chunk.staged?
|
923
|
-
lock_in_block = enter_count == 3
|
924
|
-
assert_equal(enter_count - 2, exit_count) if lock_in_block
|
925
|
-
else
|
926
|
-
lock_in_block = enter_count == 2
|
927
|
-
assert_equal(enter_count - 2, exit_count) if lock_in_block
|
928
|
-
end
|
929
|
-
end
|
930
|
-
stub(chunk).mon_exit do
|
931
|
-
exit_count = 1 + mon_exit_counts_by_chunk.fetch(chunk, 0)
|
932
|
-
mon_exit_counts_by_chunk[chunk] = exit_count
|
933
|
-
end
|
934
|
-
chunk
|
935
|
-
end
|
936
|
-
|
937
|
-
m = @p.metadata(timekey: Time.parse('2016-04-11 16:40:00 +0000').to_i)
|
938
|
-
small_row = "x" * 1024 * 400
|
939
|
-
big_row = "x" * 1024 * 1024 * 8 # just `chunk_size_limit`, it does't cause BufferOverFlowError.
|
940
|
-
|
941
|
-
# Write 42 events in 1 event stream, last one is for triggering `ShouldRetry`
|
942
|
-
@p.write({m => [small_row] * 40 + [big_row] + ["x"]})
|
943
|
-
|
944
|
-
# Above event strem will be splitted twice by `Buffer#write_step_by_step`
|
945
|
-
#
|
946
|
-
# 1. `write_once`: 42 [events] * 1 [stream]
|
947
|
-
# 2. `write_step_by_step`: 4 [events]* 10 [streams] + 2 [events] * 1 [stream]
|
948
|
-
# 3. `write_step_by_step` (by `ShouldRetry`): 1 [event] * 42 [streams]
|
949
|
-
#
|
950
|
-
# Example of staged chunk lock behavior:
|
951
|
-
#
|
952
|
-
# 1. mon_enter in write_step_by_step
|
953
|
-
# 2. ShouldRetry occurs
|
954
|
-
# 3. mon_exit in write_step_by_step
|
955
|
-
# 4. mon_enter again in write_step_by_step (retry)
|
956
|
-
# 5. passed to &block of write_step_by_step
|
957
|
-
# 6. mon_enter in the block (write)
|
958
|
-
# 7. mon_exit in write_step_by_step
|
959
|
-
# 8. mon_exit in write
|
960
|
-
|
961
|
-
assert_equal(mon_enter_counts_by_chunk.values, mon_exit_counts_by_chunk.values)
|
962
|
-
end
|
963
|
-
end
|
964
|
-
|
965
|
-
sub_test_case 'standard format with configuration for test with lower chunk limit size' do
|
966
|
-
setup do
|
967
|
-
@p = create_buffer({"chunk_limit_size" => 1_280_000})
|
968
|
-
@format = ->(e){e.to_msgpack_stream}
|
969
|
-
@dm0 = dm0 = create_metadata(Time.parse('2016-04-11 16:00:00 +0000').to_i, nil, nil)
|
970
|
-
# 1 record is 128bytes in msgpack stream
|
971
|
-
@es0 = es0 = Fluent::ArrayEventStream.new([ [event_time('2016-04-11 16:00:01 +0000'), {"message" => "x" * (128 - 22)}] ] * 5000)
|
972
|
-
(class << @p; self; end).module_eval do
|
973
|
-
define_method(:resume) {
|
974
|
-
staged = {
|
975
|
-
dm0 => create_chunk_es(dm0, es0).staged!,
|
976
|
-
}
|
977
|
-
queued = []
|
978
|
-
return staged, queued
|
979
|
-
}
|
980
|
-
end
|
981
|
-
@p.start
|
982
|
-
end
|
983
|
-
|
984
|
-
test '#write appends event stream into staged chunk' do
|
985
|
-
assert_equal [@dm0], @p.stage.keys
|
986
|
-
assert_equal [], @p.queue.map(&:metadata)
|
987
|
-
|
988
|
-
assert_equal 1_280_000, @p.chunk_limit_size
|
989
|
-
|
990
|
-
es = Fluent::ArrayEventStream.new([ [event_time('2016-04-11 16:00:02 +0000'), {"message" => "x" * (128 - 22)}] ] * 1000)
|
991
|
-
@p.write({@dm0 => es}, format: @format)
|
992
|
-
|
993
|
-
assert_equal [@dm0], @p.stage.keys
|
994
|
-
assert_equal [], @p.queue.map(&:metadata)
|
995
|
-
|
996
|
-
assert_equal (@es0.to_msgpack_stream + es.to_msgpack_stream), @p.stage[@dm0].read
|
997
|
-
end
|
998
|
-
|
999
|
-
test '#write writes event stream into a new chunk with enqueueing existing chunk if event stream is larger than available space of existing chunk' do
|
1000
|
-
assert_equal [@dm0], @p.stage.keys
|
1001
|
-
assert_equal [], @p.queue.map(&:metadata)
|
1002
|
-
|
1003
|
-
assert_equal 1_280_000, @p.chunk_limit_size
|
1004
|
-
|
1005
|
-
es = Fluent::ArrayEventStream.new([ [event_time('2016-04-11 16:00:02 +0000'), {"message" => "x" * (128 - 22)}] ] * 8000)
|
1006
|
-
@p.write({@dm0 => es}, format: @format)
|
1007
|
-
|
1008
|
-
assert_equal [@dm0], @p.stage.keys
|
1009
|
-
assert_equal [@dm0], @p.queue.map(&:metadata)
|
1010
|
-
|
1011
|
-
assert_equal (es.to_msgpack_stream), @p.stage[@dm0].read
|
1012
|
-
end
|
1013
|
-
|
1014
|
-
test '#write writes event stream into many chunks excluding staged chunk if event stream is larger than chunk limit size' do
|
1015
|
-
assert_equal [@dm0], @p.stage.keys
|
1016
|
-
assert_equal [], @p.queue.map(&:metadata)
|
1017
|
-
|
1018
|
-
assert_equal 1_280_000, @p.chunk_limit_size
|
1019
|
-
|
1020
|
-
es = Fluent::ArrayEventStream.new([ [event_time('2016-04-11 16:00:02 +0000'), {"message" => "x" * (128 - 22)}] ] * 45000)
|
1021
|
-
@p.write({@dm0 => es}, format: @format)
|
1022
|
-
|
1023
|
-
# metadata whose seq is 4 is created, but overwrite with original metadata(seq=0) for next use of this chunk https://github.com/fluent/fluentd/blob/9d113029d4550ce576d8825bfa9612aa3e55bff0/lib/fluent/plugin/buffer.rb#L357
|
1024
|
-
assert_equal [@dm0], @p.stage.keys
|
1025
|
-
assert_equal 5400, @p.stage[@dm0].size
|
1026
|
-
assert_equal [@dm0, @dm0, @dm0, @dm0, @dm0], @p.queue.map(&:metadata)
|
1027
|
-
assert_equal [5000, 9900, 9900, 9900, 9900], @p.queue.map(&:size) # splits: 45000 / 100 => 450 * ...
|
1028
|
-
# 9900 * 4 + 5400 == 45000
|
1029
|
-
end
|
1030
|
-
|
1031
|
-
test '#dequeue_chunk succeeds when chunk is splited' do
|
1032
|
-
assert_equal [@dm0], @p.stage.keys
|
1033
|
-
assert_equal [], @p.queue.map(&:metadata)
|
1034
|
-
|
1035
|
-
assert_equal 1_280_000, @p.chunk_limit_size
|
1036
|
-
|
1037
|
-
es = Fluent::ArrayEventStream.new([ [event_time('2016-04-11 16:00:02 +0000'), {"message" => "x" * (128 - 22)}] ] * 45000)
|
1038
|
-
@p.write({@dm0 => es}, format: @format)
|
1039
|
-
@p.enqueue_all(true)
|
1040
|
-
|
1041
|
-
dequeued_chunks = 6.times.map { |e| @p.dequeue_chunk } # splits: 45000 / 100 => 450 * ...
|
1042
|
-
assert_equal [5000, 9900, 9900, 9900, 9900, 5400], dequeued_chunks.map(&:size)
|
1043
|
-
assert_equal [@dm0, @dm0, @dm0, @dm0, @dm0, @dm0], dequeued_chunks.map(&:metadata)
|
1044
|
-
end
|
1045
|
-
|
1046
|
-
test '#write raises BufferChunkOverflowError if a record is biggar than chunk limit size' do
|
1047
|
-
assert_equal [@dm0], @p.stage.keys
|
1048
|
-
assert_equal [], @p.queue.map(&:metadata)
|
1049
|
-
|
1050
|
-
assert_equal 1_280_000, @p.chunk_limit_size
|
1051
|
-
|
1052
|
-
es = Fluent::ArrayEventStream.new([ [event_time('2016-04-11 16:00:02 +0000'), {"message" => "x" * 1_280_000}] ])
|
1053
|
-
assert_raise Fluent::Plugin::Buffer::BufferChunkOverflowError do
|
1054
|
-
@p.write({@dm0 => es}, format: @format)
|
1055
|
-
end
|
1056
|
-
end
|
1057
|
-
|
1058
|
-
data(
|
1059
|
-
first_chunk: Fluent::ArrayEventStream.new([[event_time('2016-04-11 16:00:02 +0000'), {"message" => "x" * 1_280_000}],
|
1060
|
-
[event_time('2016-04-11 16:00:02 +0000'), {"message" => "a"}],
|
1061
|
-
[event_time('2016-04-11 16:00:02 +0000'), {"message" => "b"}]]),
|
1062
|
-
intermediate_chunk: Fluent::ArrayEventStream.new([[event_time('2016-04-11 16:00:02 +0000'), {"message" => "a"}],
|
1063
|
-
[event_time('2016-04-11 16:00:02 +0000'), {"message" => "x" * 1_280_000}],
|
1064
|
-
[event_time('2016-04-11 16:00:02 +0000'), {"message" => "b"}]]),
|
1065
|
-
last_chunk: Fluent::ArrayEventStream.new([[event_time('2016-04-11 16:00:02 +0000'), {"message" => "a"}],
|
1066
|
-
[event_time('2016-04-11 16:00:02 +0000'), {"message" => "b"}],
|
1067
|
-
[event_time('2016-04-11 16:00:02 +0000'), {"message" => "x" * 1_280_000}]]),
|
1068
|
-
multiple_chunks: Fluent::ArrayEventStream.new([[event_time('2016-04-11 16:00:02 +0000'), {"message" => "a"}],
|
1069
|
-
[event_time('2016-04-11 16:00:02 +0000'), {"message" => "x" * 1_280_000}],
|
1070
|
-
[event_time('2016-04-11 16:00:02 +0000'), {"message" => "b"}],
|
1071
|
-
[event_time('2016-04-11 16:00:02 +0000'), {"message" => "x" * 1_280_000}]])
|
1072
|
-
)
|
1073
|
-
test '#write exceeds chunk_limit_size, raise BufferChunkOverflowError, but not lost whole messages' do |(es)|
|
1074
|
-
assert_equal [@dm0], @p.stage.keys
|
1075
|
-
assert_equal [], @p.queue.map(&:metadata)
|
1076
|
-
|
1077
|
-
assert_equal 1_280_000, @p.chunk_limit_size
|
1078
|
-
|
1079
|
-
nth = []
|
1080
|
-
es.entries.each_with_index do |entry, index|
|
1081
|
-
if entry.last["message"].size == @p.chunk_limit_size
|
1082
|
-
nth << index
|
1083
|
-
end
|
1084
|
-
end
|
1085
|
-
messages = []
|
1086
|
-
nth.each do |n|
|
1087
|
-
messages << "a 1280025 bytes record (nth: #{n}) is larger than buffer chunk limit size (1280000)"
|
1088
|
-
end
|
1089
|
-
|
1090
|
-
assert_raise Fluent::Plugin::Buffer::BufferChunkOverflowError.new(messages.join(", ")) do
|
1091
|
-
@p.write({@dm0 => es}, format: @format)
|
1092
|
-
end
|
1093
|
-
# message a and b are concatenated and staged
|
1094
|
-
staged_messages = Fluent::MessagePackFactory.msgpack_unpacker.feed_each(@p.stage[@dm0].chunk).collect do |record|
|
1095
|
-
record.last
|
1096
|
-
end
|
1097
|
-
assert_equal([2, [{"message" => "a"}, {"message" => "b"}]],
|
1098
|
-
[@p.stage[@dm0].size, staged_messages])
|
1099
|
-
# only es0 message is queued
|
1100
|
-
assert_equal [@dm0], @p.queue.map(&:metadata)
|
1101
|
-
assert_equal [5000], @p.queue.map(&:size)
|
1102
|
-
end
|
1103
|
-
|
1104
|
-
test "confirm that every message which is smaller than chunk threshold does not raise BufferChunkOverflowError" do
|
1105
|
-
assert_equal [@dm0], @p.stage.keys
|
1106
|
-
assert_equal [], @p.queue.map(&:metadata)
|
1107
|
-
timestamp = event_time('2016-04-11 16:00:02 +0000')
|
1108
|
-
es = Fluent::ArrayEventStream.new([[timestamp, {"message" => "a" * 1_000_000}],
|
1109
|
-
[timestamp, {"message" => "b" * 1_000_000}],
|
1110
|
-
[timestamp, {"message" => "c" * 1_000_000}]])
|
1111
|
-
|
1112
|
-
# https://github.com/fluent/fluentd/issues/1849
|
1113
|
-
# Even though 1_000_000 < 1_280_000 (chunk_limit_size), it raised BufferChunkOverflowError before.
|
1114
|
-
# It should not be raised and message a,b,c should be stored into 3 chunks.
|
1115
|
-
assert_nothing_raised do
|
1116
|
-
@p.write({@dm0 => es}, format: @format)
|
1117
|
-
end
|
1118
|
-
messages = []
|
1119
|
-
# pick up first letter to check whether chunk is queued in expected order
|
1120
|
-
3.times do |index|
|
1121
|
-
chunk = @p.queue[index]
|
1122
|
-
es = Fluent::MessagePackEventStream.new(chunk.chunk)
|
1123
|
-
es.ensure_unpacked!
|
1124
|
-
records = es.instance_eval{ @unpacked_records }
|
1125
|
-
records.each do |record|
|
1126
|
-
messages << record["message"][0]
|
1127
|
-
end
|
1128
|
-
end
|
1129
|
-
es = Fluent::MessagePackEventStream.new(@p.stage[@dm0].chunk)
|
1130
|
-
es.ensure_unpacked!
|
1131
|
-
staged_message = es.instance_eval{ @unpacked_records }.first["message"]
|
1132
|
-
# message a and b are queued, message c is staged
|
1133
|
-
assert_equal([
|
1134
|
-
[@dm0],
|
1135
|
-
"c" * 1_000_000,
|
1136
|
-
[@dm0, @dm0, @dm0],
|
1137
|
-
[5000, 1, 1],
|
1138
|
-
[["x"] * 5000, "a", "b"].flatten
|
1139
|
-
],
|
1140
|
-
[
|
1141
|
-
@p.stage.keys,
|
1142
|
-
staged_message,
|
1143
|
-
@p.queue.map(&:metadata),
|
1144
|
-
@p.queue.map(&:size),
|
1145
|
-
messages
|
1146
|
-
])
|
1147
|
-
end
|
1148
|
-
end
|
1149
|
-
|
1150
|
-
sub_test_case 'custom format with configuration for test with lower chunk limit size' do
|
1151
|
-
setup do
|
1152
|
-
@p = create_buffer({"chunk_limit_size" => 1_280_000})
|
1153
|
-
@dm0 = dm0 = create_metadata(Time.parse('2016-04-11 16:00:00 +0000').to_i, nil, nil)
|
1154
|
-
@row = "x" * 128
|
1155
|
-
@data0 = data0 = [@row] * 5000
|
1156
|
-
(class << @p; self; end).module_eval do
|
1157
|
-
define_method(:resume) {
|
1158
|
-
staged = {
|
1159
|
-
dm0 => create_chunk(dm0, data0).staged!,
|
1160
|
-
}
|
1161
|
-
queued = []
|
1162
|
-
return staged, queued
|
1163
|
-
}
|
1164
|
-
end
|
1165
|
-
@p.start
|
1166
|
-
end
|
1167
|
-
|
1168
|
-
test '#write appends event stream into staged chunk' do
|
1169
|
-
assert_equal [@dm0], @p.stage.keys
|
1170
|
-
assert_equal [], @p.queue.map(&:metadata)
|
1171
|
-
|
1172
|
-
assert_equal 1_280_000, @p.chunk_limit_size
|
1173
|
-
|
1174
|
-
data = [@row] * 1000
|
1175
|
-
@p.write({@dm0 => data})
|
1176
|
-
|
1177
|
-
assert_equal [@dm0], @p.stage.keys
|
1178
|
-
assert_equal [], @p.queue.map(&:metadata)
|
1179
|
-
|
1180
|
-
assert_equal (@row * 6000), @p.stage[@dm0].read
|
1181
|
-
end
|
1182
|
-
|
1183
|
-
test '#write writes event stream into a new chunk with enqueueing existing chunk if event stream is larger than available space of existing chunk' do
|
1184
|
-
assert_equal [@dm0], @p.stage.keys
|
1185
|
-
assert_equal [], @p.queue.map(&:metadata)
|
1186
|
-
|
1187
|
-
staged_chunk_object_id = @p.stage[@dm0].object_id
|
1188
|
-
|
1189
|
-
assert_equal 1_280_000, @p.chunk_limit_size
|
1190
|
-
|
1191
|
-
data = [@row] * 8000
|
1192
|
-
@p.write({@dm0 => data})
|
1193
|
-
|
1194
|
-
assert_equal [@dm0], @p.queue.map(&:metadata)
|
1195
|
-
assert_equal [staged_chunk_object_id], @p.queue.map(&:object_id)
|
1196
|
-
assert_equal [@dm0], @p.stage.keys
|
1197
|
-
|
1198
|
-
assert_equal [9800], @p.queue.map(&:size)
|
1199
|
-
assert_equal 3200, @p.stage[@dm0].size
|
1200
|
-
# 9800 + 3200 == 5000 + 8000
|
1201
|
-
end
|
1202
|
-
|
1203
|
-
test '#write writes event stream into many chunks including staging chunk if event stream is larger than chunk limit size' do
|
1204
|
-
assert_equal [@dm0], @p.stage.keys
|
1205
|
-
assert_equal [], @p.queue.map(&:metadata)
|
1206
|
-
|
1207
|
-
staged_chunk_object_id = @p.stage[@dm0].object_id
|
1208
|
-
|
1209
|
-
assert_equal 1_280_000, @p.chunk_limit_size
|
1210
|
-
|
1211
|
-
assert_equal 5000, @p.stage[@dm0].size
|
1212
|
-
|
1213
|
-
data = [@row] * 45000
|
1214
|
-
@p.write({@dm0 => data})
|
1215
|
-
|
1216
|
-
assert_equal staged_chunk_object_id, @p.queue.first.object_id
|
1217
|
-
|
1218
|
-
assert_equal [@dm0], @p.stage.keys
|
1219
|
-
assert_equal 900, @p.stage[@dm0].size
|
1220
|
-
assert_equal [@dm0, @dm0, @dm0, @dm0, @dm0], @p.queue.map(&:metadata)
|
1221
|
-
assert_equal [9500, 9900, 9900, 9900, 9900], @p.queue.map(&:size) # splits: 45000 / 100 => 450 * ...
|
1222
|
-
##### 900 + 9500 + 9900 * 4 == 5000 + 45000
|
1223
|
-
end
|
1224
|
-
|
1225
|
-
test '#write raises BufferChunkOverflowError if a record is bigger than chunk limit size' do
|
1226
|
-
assert_equal [@dm0], @p.stage.keys
|
1227
|
-
assert_equal [], @p.queue.map(&:metadata)
|
1228
|
-
|
1229
|
-
assert_equal 1_280_000, @p.chunk_limit_size
|
1230
|
-
|
1231
|
-
es = ["x" * 1_280_000 + "x" * 300]
|
1232
|
-
assert_raise Fluent::Plugin::Buffer::BufferChunkOverflowError do
|
1233
|
-
@p.write({@dm0 => es})
|
1234
|
-
end
|
1235
|
-
end
|
1236
|
-
|
1237
|
-
test 'confirm that every array message which is smaller than chunk threshold does not raise BufferChunkOverflowError' do
|
1238
|
-
assert_equal [@dm0], @p.stage.keys
|
1239
|
-
assert_equal [], @p.queue.map(&:metadata)
|
1240
|
-
|
1241
|
-
assert_equal 1_280_000, @p.chunk_limit_size
|
1242
|
-
|
1243
|
-
es = ["a" * 1_000_000, "b" * 1_000_000, "c" * 1_000_000]
|
1244
|
-
assert_nothing_raised do
|
1245
|
-
@p.write({@dm0 => es})
|
1246
|
-
end
|
1247
|
-
queue_messages = @p.queue.collect do |chunk|
|
1248
|
-
# collect first character of each message
|
1249
|
-
chunk.chunk[0]
|
1250
|
-
end
|
1251
|
-
assert_equal([
|
1252
|
-
[@dm0],
|
1253
|
-
1,
|
1254
|
-
"c",
|
1255
|
-
[@dm0, @dm0, @dm0],
|
1256
|
-
[5000, 1, 1],
|
1257
|
-
["x", "a", "b"]
|
1258
|
-
],
|
1259
|
-
[
|
1260
|
-
@p.stage.keys,
|
1261
|
-
@p.stage[@dm0].size,
|
1262
|
-
@p.stage[@dm0].chunk[0],
|
1263
|
-
@p.queue.map(&:metadata),
|
1264
|
-
@p.queue.map(&:size),
|
1265
|
-
queue_messages
|
1266
|
-
])
|
1267
|
-
end
|
1268
|
-
end
|
1269
|
-
|
1270
|
-
sub_test_case 'with configuration for test with lower limits' do
|
1271
|
-
setup do
|
1272
|
-
@p = create_buffer({"chunk_limit_size" => 1024, "total_limit_size" => 10240})
|
1273
|
-
@dm0 = dm0 = create_metadata(Time.parse('2016-04-11 16:00:00 +0000').to_i, nil, nil)
|
1274
|
-
@dm1 = dm1 = create_metadata(Time.parse('2016-04-11 16:10:00 +0000').to_i, nil, nil)
|
1275
|
-
@dm2 = dm2 = create_metadata(Time.parse('2016-04-11 16:20:00 +0000').to_i, nil, nil)
|
1276
|
-
@dm3 = dm3 = create_metadata(Time.parse('2016-04-11 16:30:00 +0000').to_i, nil, nil)
|
1277
|
-
(class << @p; self; end).module_eval do
|
1278
|
-
define_method(:resume) {
|
1279
|
-
staged = {
|
1280
|
-
dm2 => create_chunk(dm2, ["b" * 128] * 7).staged!,
|
1281
|
-
dm3 => create_chunk(dm3, ["c" * 128] * 5).staged!,
|
1282
|
-
}
|
1283
|
-
queued = [
|
1284
|
-
create_chunk(dm0, ["0" * 128] * 8).enqueued!,
|
1285
|
-
create_chunk(dm0, ["0" * 128] * 8).enqueued!,
|
1286
|
-
create_chunk(dm0, ["0" * 128] * 8).enqueued!,
|
1287
|
-
create_chunk(dm0, ["0" * 128] * 8).enqueued!,
|
1288
|
-
create_chunk(dm0, ["0" * 128] * 8).enqueued!,
|
1289
|
-
create_chunk(dm1, ["a" * 128] * 8).enqueued!,
|
1290
|
-
create_chunk(dm1, ["a" * 128] * 8).enqueued!,
|
1291
|
-
create_chunk(dm1, ["a" * 128] * 8).enqueued!, # 8th queued chunk
|
1292
|
-
create_chunk(dm1, ["a" * 128] * 3).enqueued!,
|
1293
|
-
]
|
1294
|
-
return staged, queued
|
1295
|
-
}
|
1296
|
-
end
|
1297
|
-
@p.start
|
1298
|
-
end
|
1299
|
-
|
1300
|
-
test '#storable? returns false when too many data exist' do
|
1301
|
-
assert_equal [@dm0,@dm0,@dm0,@dm0,@dm0,@dm1,@dm1,@dm1,@dm1], @p.queue.map(&:metadata)
|
1302
|
-
assert_equal [@dm2,@dm3], @p.stage.keys
|
1303
|
-
|
1304
|
-
assert_equal 128*8*8+128*3, @p.queue_size
|
1305
|
-
assert_equal 128*7+128*5, @p.stage_size
|
1306
|
-
|
1307
|
-
assert @p.storable?
|
1308
|
-
|
1309
|
-
dm3 = @p.metadata(timekey: @dm3.timekey)
|
1310
|
-
@p.write({dm3 => ["c" * 128]})
|
1311
|
-
|
1312
|
-
assert_equal 10240, (@p.stage_size + @p.queue_size)
|
1313
|
-
assert !@p.storable?
|
1314
|
-
end
|
1315
|
-
|
1316
|
-
test '#chunk_size_over? returns true if chunk size is bigger than limit' do
|
1317
|
-
m = create_metadata(Time.parse('2016-04-11 16:40:00 +0000').to_i)
|
1318
|
-
|
1319
|
-
c1 = create_chunk(m, ["a" * 128] * 8)
|
1320
|
-
assert !@p.chunk_size_over?(c1)
|
1321
|
-
|
1322
|
-
c2 = create_chunk(m, ["a" * 128] * 9)
|
1323
|
-
assert @p.chunk_size_over?(c2)
|
1324
|
-
|
1325
|
-
c3 = create_chunk(m, ["a" * 128] * 8 + ["a"])
|
1326
|
-
assert @p.chunk_size_over?(c3)
|
1327
|
-
end
|
1328
|
-
|
1329
|
-
test '#chunk_size_full? returns true if chunk size is enough big against limit' do
|
1330
|
-
m = create_metadata(Time.parse('2016-04-11 16:40:00 +0000').to_i)
|
1331
|
-
|
1332
|
-
c1 = create_chunk(m, ["a" * 128] * 7)
|
1333
|
-
assert !@p.chunk_size_full?(c1)
|
1334
|
-
|
1335
|
-
c2 = create_chunk(m, ["a" * 128] * 8)
|
1336
|
-
assert @p.chunk_size_full?(c2)
|
1337
|
-
|
1338
|
-
assert_equal 0.95, @p.chunk_full_threshold
|
1339
|
-
c3 = create_chunk(m, ["a" * 128] * 6 + ["a" * 64])
|
1340
|
-
assert !@p.chunk_size_full?(c3)
|
1341
|
-
end
|
1342
|
-
end
|
1343
|
-
|
1344
|
-
sub_test_case 'with configuration includes chunk_limit_records' do
|
1345
|
-
setup do
|
1346
|
-
@p = create_buffer({"chunk_limit_size" => 1024, "total_limit_size" => 10240, "chunk_limit_records" => 6})
|
1347
|
-
@dm0 = dm0 = create_metadata(Time.parse('2016-04-11 16:00:00 +0000').to_i, nil, nil)
|
1348
|
-
@dm1 = dm1 = create_metadata(Time.parse('2016-04-11 16:10:00 +0000').to_i, nil, nil)
|
1349
|
-
@dm2 = dm2 = create_metadata(Time.parse('2016-04-11 16:20:00 +0000').to_i, nil, nil)
|
1350
|
-
@dm3 = dm3 = create_metadata(Time.parse('2016-04-11 16:30:00 +0000').to_i, nil, nil)
|
1351
|
-
(class << @p; self; end).module_eval do
|
1352
|
-
define_method(:resume) {
|
1353
|
-
staged = {
|
1354
|
-
dm2 => create_chunk(dm2, ["b" * 128] * 1).staged!,
|
1355
|
-
dm3 => create_chunk(dm3, ["c" * 128] * 2).staged!,
|
1356
|
-
}
|
1357
|
-
queued = [
|
1358
|
-
create_chunk(dm0, ["0" * 128] * 6).enqueued!,
|
1359
|
-
create_chunk(dm1, ["a" * 128] * 6).enqueued!,
|
1360
|
-
create_chunk(dm1, ["a" * 128] * 6).enqueued!,
|
1361
|
-
create_chunk(dm1, ["a" * 128] * 3).enqueued!,
|
1362
|
-
]
|
1363
|
-
return staged, queued
|
1364
|
-
}
|
1365
|
-
end
|
1366
|
-
@p.start
|
1367
|
-
end
|
1368
|
-
|
1369
|
-
test '#chunk_size_over? returns true if too many records exists in a chunk even if its bytes is less than limit' do
|
1370
|
-
assert_equal 6, @p.chunk_limit_records
|
1371
|
-
|
1372
|
-
m = create_metadata(Time.parse('2016-04-11 16:40:00 +0000').to_i)
|
1373
|
-
|
1374
|
-
c1 = create_chunk(m, ["a" * 128] * 6)
|
1375
|
-
assert_equal 6, c1.size
|
1376
|
-
assert !@p.chunk_size_over?(c1)
|
1377
|
-
|
1378
|
-
c2 = create_chunk(m, ["a" * 128] * 7)
|
1379
|
-
assert @p.chunk_size_over?(c2)
|
1380
|
-
|
1381
|
-
c3 = create_chunk(m, ["a" * 128] * 6 + ["a"])
|
1382
|
-
assert @p.chunk_size_over?(c3)
|
1383
|
-
end
|
1384
|
-
|
1385
|
-
test '#chunk_size_full? returns true if enough many records exists in a chunk even if its bytes is less than limit' do
|
1386
|
-
assert_equal 6, @p.chunk_limit_records
|
1387
|
-
|
1388
|
-
m = create_metadata(Time.parse('2016-04-11 16:40:00 +0000').to_i)
|
1389
|
-
|
1390
|
-
c1 = create_chunk(m, ["a" * 128] * 5)
|
1391
|
-
assert_equal 5, c1.size
|
1392
|
-
assert !@p.chunk_size_full?(c1)
|
1393
|
-
|
1394
|
-
c2 = create_chunk(m, ["a" * 128] * 6)
|
1395
|
-
assert @p.chunk_size_full?(c2)
|
1396
|
-
|
1397
|
-
c3 = create_chunk(m, ["a" * 128] * 5 + ["a"])
|
1398
|
-
assert @p.chunk_size_full?(c3)
|
1399
|
-
end
|
1400
|
-
end
|
1401
|
-
|
1402
|
-
sub_test_case 'with configuration includes queue_limit_length' do
|
1403
|
-
setup do
|
1404
|
-
@p = create_buffer({"chunk_limit_size" => 1024, "total_limit_size" => 10240, "queue_limit_length" => 5})
|
1405
|
-
@dm0 = dm0 = create_metadata(Time.parse('2016-04-11 16:00:00 +0000').to_i, nil, nil)
|
1406
|
-
@dm1 = dm1 = create_metadata(Time.parse('2016-04-11 16:10:00 +0000').to_i, nil, nil)
|
1407
|
-
@dm2 = dm2 = create_metadata(Time.parse('2016-04-11 16:20:00 +0000').to_i, nil, nil)
|
1408
|
-
@dm3 = dm3 = create_metadata(Time.parse('2016-04-11 16:30:00 +0000').to_i, nil, nil)
|
1409
|
-
(class << @p; self; end).module_eval do
|
1410
|
-
define_method(:resume) {
|
1411
|
-
staged = {
|
1412
|
-
dm2 => create_chunk(dm2, ["b" * 128] * 1).staged!,
|
1413
|
-
dm3 => create_chunk(dm3, ["c" * 128] * 2).staged!,
|
1414
|
-
}
|
1415
|
-
queued = [
|
1416
|
-
create_chunk(dm0, ["0" * 128] * 6).enqueued!,
|
1417
|
-
create_chunk(dm1, ["a" * 128] * 6).enqueued!,
|
1418
|
-
create_chunk(dm1, ["a" * 128] * 6).enqueued!,
|
1419
|
-
create_chunk(dm1, ["a" * 128] * 3).enqueued!,
|
1420
|
-
]
|
1421
|
-
return staged, queued
|
1422
|
-
}
|
1423
|
-
end
|
1424
|
-
@p.start
|
1425
|
-
end
|
1426
|
-
|
1427
|
-
test '#configure will overwrite standard configuration if queue_limit_length' do
|
1428
|
-
assert_equal 1024, @p.chunk_limit_size
|
1429
|
-
assert_equal 5, @p.queue_limit_length
|
1430
|
-
assert_equal (1024*5), @p.total_limit_size
|
1431
|
-
end
|
1432
|
-
end
|
1433
|
-
|
1434
|
-
sub_test_case 'when compress is gzip' do
|
1435
|
-
setup do
|
1436
|
-
@p = create_buffer({'compress' => 'gzip'})
|
1437
|
-
@dm0 = create_metadata(Time.parse('2016-04-11 16:00:00 +0000').to_i, nil, nil)
|
1438
|
-
end
|
1439
|
-
|
1440
|
-
test '#compress returns :gzip' do
|
1441
|
-
assert_equal :gzip, @p.compress
|
1442
|
-
end
|
1443
|
-
|
1444
|
-
test 'create decompressable chunk' do
|
1445
|
-
chunk = @p.generate_chunk(create_metadata)
|
1446
|
-
assert chunk.singleton_class.ancestors.include?(Fluent::Plugin::Buffer::Chunk::Decompressable)
|
1447
|
-
end
|
1448
|
-
|
1449
|
-
test '#write compressed data which exceeds chunk_limit_size, it raises BufferChunkOverflowError' do
|
1450
|
-
@p = create_buffer({'compress' => 'gzip', 'chunk_limit_size' => 70})
|
1451
|
-
timestamp = event_time('2016-04-11 16:00:02 +0000')
|
1452
|
-
es = Fluent::ArrayEventStream.new([[timestamp, {"message" => "012345"}], # overflow
|
1453
|
-
[timestamp, {"message" => "aaa"}],
|
1454
|
-
[timestamp, {"message" => "bbb"}]])
|
1455
|
-
assert_equal [], @p.queue.map(&:metadata)
|
1456
|
-
assert_equal 70, @p.chunk_limit_size
|
1457
|
-
|
1458
|
-
# calculate the actual boundary value. it varies on machine
|
1459
|
-
c = @p.generate_chunk(create_metadata)
|
1460
|
-
c.append(Fluent::ArrayEventStream.new([[timestamp, {"message" => "012345"}]]), compress: :gzip)
|
1461
|
-
overflow_bytes = c.bytesize
|
1462
|
-
|
1463
|
-
messages = "concatenated/appended a #{overflow_bytes} bytes record (nth: 0) is larger than buffer chunk limit size (70)"
|
1464
|
-
assert_raise Fluent::Plugin::Buffer::BufferChunkOverflowError.new(messages) do
|
1465
|
-
# test format == nil && compress == :gzip
|
1466
|
-
@p.write({@dm0 => es})
|
1467
|
-
end
|
1468
|
-
# message a and b occupies each chunks in full, so both of messages are queued (no staged chunk)
|
1469
|
-
assert_equal([2, [@dm0, @dm0], [1, 1], nil],
|
1470
|
-
[@p.queue.size, @p.queue.map(&:metadata), @p.queue.map(&:size), @p.stage[@dm0]])
|
1471
|
-
end
|
1472
|
-
end
|
1473
|
-
|
1474
|
-
sub_test_case '#statistics' do
|
1475
|
-
setup do
|
1476
|
-
@p = create_buffer({ "total_limit_size" => 1024 })
|
1477
|
-
dm = create_metadata(Time.parse('2020-03-13 16:00:00 +0000').to_i, nil, nil)
|
1478
|
-
|
1479
|
-
(class << @p; self; end).module_eval do
|
1480
|
-
define_method(:resume) {
|
1481
|
-
queued = [create_chunk(dm, ["a" * (1024 - 102)]).enqueued!]
|
1482
|
-
return {}, queued
|
1483
|
-
}
|
1484
|
-
end
|
1485
|
-
|
1486
|
-
@p.start
|
1487
|
-
end
|
1488
|
-
|
1489
|
-
test 'returns available_buffer_space_ratios' do
|
1490
|
-
assert_equal 10.0, @p.statistics['buffer']['available_buffer_space_ratios']
|
1491
|
-
end
|
1492
|
-
end
|
1493
|
-
end
|