fluentd 0.12.40 → 1.6.2
Sign up to get free protection for your applications and to get access to all the features.
Potentially problematic release.
This version of fluentd might be problematic. Click here for more details.
- checksums.yaml +5 -5
- data/.github/ISSUE_TEMPLATE/bug_report.md +39 -0
- data/.github/ISSUE_TEMPLATE/feature_request.md +23 -0
- data/.github/ISSUE_TEMPLATE.md +17 -0
- data/.github/PULL_REQUEST_TEMPLATE.md +13 -0
- data/.gitignore +5 -0
- data/.gitlab/cicd-template.yaml +10 -0
- data/.gitlab-ci.yml +147 -0
- data/.travis.yml +56 -20
- data/ADOPTERS.md +5 -0
- data/CHANGELOG.md +1369 -0
- data/CONTRIBUTING.md +16 -5
- data/GOVERNANCE.md +55 -0
- data/Gemfile +5 -0
- data/GithubWorkflow.md +78 -0
- data/LICENSE +202 -0
- data/MAINTAINERS.md +7 -0
- data/README.md +23 -11
- data/Rakefile +48 -2
- data/Vagrantfile +17 -0
- data/appveyor.yml +37 -0
- data/bin/fluent-binlog-reader +7 -0
- data/bin/fluent-ca-generate +6 -0
- data/bin/fluent-plugin-config-format +5 -0
- data/bin/fluent-plugin-generate +5 -0
- data/bin/fluentd +3 -0
- data/code-of-conduct.md +3 -0
- data/example/copy_roundrobin.conf +39 -0
- data/example/counter.conf +18 -0
- data/example/in_dummy_blocks.conf +17 -0
- data/example/in_dummy_with_compression.conf +23 -0
- data/example/in_forward.conf +7 -0
- data/example/in_forward_client.conf +37 -0
- data/example/in_forward_shared_key.conf +15 -0
- data/example/in_forward_tls.conf +14 -0
- data/example/in_forward_users.conf +24 -0
- data/example/in_forward_workers.conf +21 -0
- data/example/in_http.conf +3 -1
- data/example/in_out_forward.conf +17 -0
- data/example/logevents.conf +25 -0
- data/example/multi_filters.conf +61 -0
- data/example/out_exec_filter.conf +42 -0
- data/example/out_forward.conf +13 -13
- data/example/out_forward_buf_file.conf +23 -0
- data/example/out_forward_client.conf +109 -0
- data/example/out_forward_heartbeat_none.conf +16 -0
- data/example/out_forward_shared_key.conf +36 -0
- data/example/out_forward_tls.conf +18 -0
- data/example/out_forward_users.conf +65 -0
- data/example/out_null.conf +36 -0
- data/example/secondary_file.conf +42 -0
- data/example/suppress_config_dump.conf +7 -0
- data/example/worker_section.conf +36 -0
- data/fluent.conf +29 -0
- data/fluentd.gemspec +21 -11
- data/lib/fluent/agent.rb +67 -90
- data/lib/fluent/clock.rb +62 -0
- data/lib/fluent/command/binlog_reader.rb +244 -0
- data/lib/fluent/command/ca_generate.rb +181 -0
- data/lib/fluent/command/cat.rb +42 -18
- data/lib/fluent/command/debug.rb +12 -10
- data/lib/fluent/command/fluentd.rb +153 -5
- data/lib/fluent/command/plugin_config_formatter.rb +292 -0
- data/lib/fluent/command/plugin_generator.rb +324 -0
- data/lib/fluent/compat/call_super_mixin.rb +67 -0
- data/lib/fluent/compat/detach_process_mixin.rb +33 -0
- data/lib/fluent/compat/exec_util.rb +129 -0
- data/lib/fluent/compat/file_util.rb +54 -0
- data/lib/fluent/compat/filter.rb +68 -0
- data/lib/fluent/compat/formatter.rb +111 -0
- data/lib/fluent/compat/formatter_utils.rb +85 -0
- data/lib/fluent/compat/handle_tag_and_time_mixin.rb +62 -0
- data/lib/fluent/compat/handle_tag_name_mixin.rb +53 -0
- data/lib/fluent/compat/input.rb +49 -0
- data/lib/fluent/compat/output.rb +718 -0
- data/lib/fluent/compat/output_chain.rb +60 -0
- data/lib/fluent/compat/parser.rb +310 -0
- data/lib/fluent/compat/parser_utils.rb +40 -0
- data/lib/fluent/compat/propagate_default.rb +62 -0
- data/lib/fluent/compat/record_filter_mixin.rb +34 -0
- data/lib/fluent/compat/set_tag_key_mixin.rb +50 -0
- data/lib/fluent/compat/set_time_key_mixin.rb +69 -0
- data/lib/fluent/compat/socket_util.rb +165 -0
- data/lib/fluent/compat/string_util.rb +34 -0
- data/lib/fluent/compat/structured_format_mixin.rb +26 -0
- data/lib/fluent/compat/type_converter.rb +90 -0
- data/lib/fluent/config/configure_proxy.rb +210 -62
- data/lib/fluent/config/dsl.rb +12 -5
- data/lib/fluent/config/element.rb +107 -9
- data/lib/fluent/config/literal_parser.rb +9 -3
- data/lib/fluent/config/parser.rb +4 -4
- data/lib/fluent/config/section.rb +51 -14
- data/lib/fluent/config/types.rb +28 -13
- data/lib/fluent/config/v1_parser.rb +3 -5
- data/lib/fluent/config.rb +23 -20
- data/lib/fluent/configurable.rb +79 -21
- data/lib/fluent/counter/base_socket.rb +46 -0
- data/lib/fluent/counter/client.rb +297 -0
- data/lib/fluent/counter/error.rb +86 -0
- data/lib/fluent/counter/mutex_hash.rb +163 -0
- data/lib/fluent/counter/server.rb +273 -0
- data/lib/fluent/counter/store.rb +205 -0
- data/lib/fluent/counter/validator.rb +145 -0
- data/lib/fluent/counter.rb +23 -0
- data/lib/fluent/daemon.rb +15 -0
- data/lib/fluent/engine.rb +102 -65
- data/lib/fluent/env.rb +7 -3
- data/lib/fluent/error.rb +30 -0
- data/lib/fluent/event.rb +197 -21
- data/lib/fluent/event_router.rb +93 -10
- data/lib/fluent/filter.rb +2 -50
- data/lib/fluent/formatter.rb +4 -293
- data/lib/fluent/input.rb +2 -32
- data/lib/fluent/label.rb +10 -2
- data/lib/fluent/load.rb +3 -3
- data/lib/fluent/log.rb +348 -81
- data/lib/fluent/match.rb +37 -36
- data/lib/fluent/mixin.rb +12 -176
- data/lib/fluent/msgpack_factory.rb +62 -0
- data/lib/fluent/output.rb +10 -612
- data/lib/fluent/output_chain.rb +23 -0
- data/lib/fluent/parser.rb +4 -800
- data/lib/fluent/plugin/bare_output.rb +63 -0
- data/lib/fluent/plugin/base.rb +192 -0
- data/lib/fluent/plugin/buf_file.rb +128 -174
- data/lib/fluent/plugin/buf_memory.rb +9 -92
- data/lib/fluent/plugin/buffer/chunk.rb +221 -0
- data/lib/fluent/plugin/buffer/file_chunk.rb +383 -0
- data/lib/fluent/plugin/buffer/memory_chunk.rb +90 -0
- data/lib/fluent/plugin/buffer.rb +779 -0
- data/lib/fluent/plugin/compressable.rb +92 -0
- data/lib/fluent/plugin/exec_util.rb +3 -108
- data/lib/fluent/plugin/file_util.rb +4 -34
- data/lib/fluent/plugin/file_wrapper.rb +120 -0
- data/lib/fluent/plugin/filter.rb +93 -0
- data/lib/fluent/plugin/filter_grep.rb +117 -34
- data/lib/fluent/plugin/filter_parser.rb +85 -62
- data/lib/fluent/plugin/filter_record_transformer.rb +27 -39
- data/lib/fluent/plugin/filter_stdout.rb +15 -12
- data/lib/fluent/plugin/formatter.rb +50 -0
- data/lib/fluent/plugin/formatter_csv.rb +52 -0
- data/lib/fluent/plugin/formatter_hash.rb +33 -0
- data/lib/fluent/plugin/formatter_json.rb +55 -0
- data/lib/fluent/plugin/formatter_ltsv.rb +42 -0
- data/lib/fluent/plugin/formatter_msgpack.rb +33 -0
- data/lib/fluent/plugin/formatter_out_file.rb +51 -0
- data/lib/fluent/plugin/formatter_single_value.rb +34 -0
- data/lib/fluent/plugin/formatter_stdout.rb +76 -0
- data/lib/fluent/plugin/formatter_tsv.rb +38 -0
- data/lib/fluent/plugin/in_debug_agent.rb +17 -6
- data/lib/fluent/plugin/in_dummy.rb +47 -20
- data/lib/fluent/plugin/in_exec.rb +55 -123
- data/lib/fluent/plugin/in_forward.rb +299 -216
- data/lib/fluent/plugin/in_gc_stat.rb +14 -36
- data/lib/fluent/plugin/in_http.rb +204 -91
- data/lib/fluent/plugin/in_monitor_agent.rb +186 -258
- data/lib/fluent/plugin/in_object_space.rb +13 -41
- data/lib/fluent/plugin/in_syslog.rb +112 -134
- data/lib/fluent/plugin/in_tail.rb +408 -745
- data/lib/fluent/plugin/in_tcp.rb +66 -9
- data/lib/fluent/plugin/in_udp.rb +60 -11
- data/lib/fluent/plugin/{in_stream.rb → in_unix.rb} +8 -4
- data/lib/fluent/plugin/input.rb +37 -0
- data/lib/fluent/plugin/multi_output.rb +158 -0
- data/lib/fluent/plugin/out_copy.rb +23 -35
- data/lib/fluent/plugin/out_exec.rb +67 -70
- data/lib/fluent/plugin/out_exec_filter.rb +204 -271
- data/lib/fluent/plugin/out_file.rb +267 -73
- data/lib/fluent/plugin/out_forward.rb +854 -325
- data/lib/fluent/plugin/out_null.rb +42 -9
- data/lib/fluent/plugin/out_relabel.rb +9 -5
- data/lib/fluent/plugin/out_roundrobin.rb +18 -37
- data/lib/fluent/plugin/out_secondary_file.rb +133 -0
- data/lib/fluent/plugin/out_stdout.rb +43 -10
- data/lib/fluent/plugin/out_stream.rb +7 -2
- data/lib/fluent/plugin/output.rb +1498 -0
- data/lib/fluent/plugin/owned_by_mixin.rb +42 -0
- data/lib/fluent/plugin/parser.rb +191 -0
- data/lib/fluent/plugin/parser_apache.rb +28 -0
- data/lib/fluent/plugin/parser_apache2.rb +88 -0
- data/lib/fluent/plugin/parser_apache_error.rb +26 -0
- data/lib/fluent/plugin/parser_csv.rb +39 -0
- data/lib/fluent/plugin/parser_json.rb +94 -0
- data/lib/fluent/plugin/parser_ltsv.rb +49 -0
- data/lib/fluent/plugin/parser_msgpack.rb +50 -0
- data/lib/fluent/plugin/parser_multiline.rb +106 -0
- data/lib/fluent/plugin/parser_nginx.rb +28 -0
- data/lib/fluent/plugin/parser_none.rb +36 -0
- data/lib/fluent/plugin/parser_regexp.rb +68 -0
- data/lib/fluent/plugin/parser_syslog.rb +142 -0
- data/lib/fluent/plugin/parser_tsv.rb +42 -0
- data/lib/fluent/plugin/socket_util.rb +3 -143
- data/lib/fluent/plugin/storage.rb +84 -0
- data/lib/fluent/plugin/storage_local.rb +164 -0
- data/lib/fluent/plugin/string_util.rb +3 -15
- data/lib/fluent/plugin.rb +122 -121
- data/lib/fluent/plugin_helper/cert_option.rb +178 -0
- data/lib/fluent/plugin_helper/child_process.rb +364 -0
- data/lib/fluent/plugin_helper/compat_parameters.rb +333 -0
- data/lib/fluent/plugin_helper/counter.rb +51 -0
- data/lib/fluent/plugin_helper/event_emitter.rb +93 -0
- data/lib/fluent/plugin_helper/event_loop.rb +170 -0
- data/lib/fluent/plugin_helper/extract.rb +104 -0
- data/lib/fluent/plugin_helper/formatter.rb +147 -0
- data/lib/fluent/plugin_helper/http_server/app.rb +79 -0
- data/lib/fluent/plugin_helper/http_server/compat/server.rb +81 -0
- data/lib/fluent/plugin_helper/http_server/compat/webrick_handler.rb +58 -0
- data/lib/fluent/plugin_helper/http_server/methods.rb +35 -0
- data/lib/fluent/plugin_helper/http_server/request.rb +42 -0
- data/lib/fluent/plugin_helper/http_server/router.rb +54 -0
- data/lib/fluent/plugin_helper/http_server/server.rb +87 -0
- data/lib/fluent/plugin_helper/http_server.rb +76 -0
- data/lib/fluent/plugin_helper/inject.rb +151 -0
- data/lib/fluent/plugin_helper/parser.rb +147 -0
- data/lib/fluent/plugin_helper/record_accessor.rb +210 -0
- data/lib/fluent/plugin_helper/retry_state.rb +205 -0
- data/lib/fluent/plugin_helper/server.rb +807 -0
- data/lib/fluent/plugin_helper/socket.rb +250 -0
- data/lib/fluent/plugin_helper/socket_option.rb +80 -0
- data/lib/fluent/plugin_helper/storage.rb +349 -0
- data/lib/fluent/plugin_helper/thread.rb +179 -0
- data/lib/fluent/plugin_helper/timer.rb +92 -0
- data/lib/fluent/plugin_helper.rb +73 -0
- data/lib/fluent/plugin_id.rb +80 -0
- data/lib/fluent/process.rb +3 -489
- data/lib/fluent/registry.rb +52 -10
- data/lib/fluent/root_agent.rb +204 -42
- data/lib/fluent/supervisor.rb +597 -359
- data/lib/fluent/system_config.rb +131 -42
- data/lib/fluent/test/base.rb +6 -54
- data/lib/fluent/test/driver/base.rb +224 -0
- data/lib/fluent/test/driver/base_owned.rb +70 -0
- data/lib/fluent/test/driver/base_owner.rb +135 -0
- data/lib/fluent/test/driver/event_feeder.rb +98 -0
- data/lib/fluent/test/driver/filter.rb +57 -0
- data/lib/fluent/test/driver/formatter.rb +30 -0
- data/lib/fluent/test/driver/input.rb +31 -0
- data/lib/fluent/test/driver/multi_output.rb +53 -0
- data/lib/fluent/test/driver/output.rb +102 -0
- data/lib/fluent/test/driver/parser.rb +30 -0
- data/lib/fluent/test/driver/test_event_router.rb +45 -0
- data/lib/fluent/test/filter_test.rb +0 -1
- data/lib/fluent/test/formatter_test.rb +4 -1
- data/lib/fluent/test/helpers.rb +58 -10
- data/lib/fluent/test/input_test.rb +27 -19
- data/lib/fluent/test/log.rb +79 -0
- data/lib/fluent/test/output_test.rb +28 -39
- data/lib/fluent/test/parser_test.rb +3 -1
- data/lib/fluent/test/startup_shutdown.rb +46 -0
- data/lib/fluent/test.rb +33 -1
- data/lib/fluent/time.rb +450 -1
- data/lib/fluent/timezone.rb +27 -3
- data/lib/fluent/{status.rb → unique_id.rb} +15 -24
- data/lib/fluent/version.rb +1 -1
- data/lib/fluent/winsvc.rb +85 -0
- data/templates/new_gem/Gemfile +3 -0
- data/templates/new_gem/README.md.erb +43 -0
- data/templates/new_gem/Rakefile +13 -0
- data/templates/new_gem/fluent-plugin.gemspec.erb +27 -0
- data/templates/new_gem/lib/fluent/plugin/filter.rb.erb +14 -0
- data/templates/new_gem/lib/fluent/plugin/formatter.rb.erb +14 -0
- data/templates/new_gem/lib/fluent/plugin/input.rb.erb +11 -0
- data/templates/new_gem/lib/fluent/plugin/output.rb.erb +11 -0
- data/templates/new_gem/lib/fluent/plugin/parser.rb.erb +15 -0
- data/templates/new_gem/test/helper.rb.erb +8 -0
- data/templates/new_gem/test/plugin/test_filter.rb.erb +18 -0
- data/templates/new_gem/test/plugin/test_formatter.rb.erb +18 -0
- data/templates/new_gem/test/plugin/test_input.rb.erb +18 -0
- data/templates/new_gem/test/plugin/test_output.rb.erb +18 -0
- data/templates/new_gem/test/plugin/test_parser.rb.erb +18 -0
- data/templates/plugin_config_formatter/param.md-compact.erb +25 -0
- data/templates/plugin_config_formatter/param.md.erb +34 -0
- data/templates/plugin_config_formatter/section.md.erb +12 -0
- data/test/command/test_binlog_reader.rb +346 -0
- data/test/command/test_ca_generate.rb +70 -0
- data/test/command/test_fluentd.rb +901 -0
- data/test/command/test_plugin_config_formatter.rb +276 -0
- data/test/command/test_plugin_generator.rb +92 -0
- data/test/compat/test_calls_super.rb +166 -0
- data/test/compat/test_parser.rb +92 -0
- data/test/config/test_config_parser.rb +126 -2
- data/test/config/test_configurable.rb +946 -187
- data/test/config/test_configure_proxy.rb +424 -74
- data/test/config/test_dsl.rb +11 -11
- data/test/config/test_element.rb +500 -0
- data/test/config/test_literal_parser.rb +8 -0
- data/test/config/test_plugin_configuration.rb +56 -0
- data/test/config/test_section.rb +79 -7
- data/test/config/test_system_config.rb +122 -35
- data/test/config/test_types.rb +38 -0
- data/test/counter/test_client.rb +559 -0
- data/test/counter/test_error.rb +44 -0
- data/test/counter/test_mutex_hash.rb +179 -0
- data/test/counter/test_server.rb +589 -0
- data/test/counter/test_store.rb +258 -0
- data/test/counter/test_validator.rb +137 -0
- data/test/helper.rb +89 -6
- data/test/helpers/fuzzy_assert.rb +89 -0
- data/test/plugin/test_bare_output.rb +118 -0
- data/test/plugin/test_base.rb +115 -0
- data/test/plugin/test_buf_file.rb +823 -460
- data/test/plugin/test_buf_memory.rb +32 -194
- data/test/plugin/test_buffer.rb +1233 -0
- data/test/plugin/test_buffer_chunk.rb +198 -0
- data/test/plugin/test_buffer_file_chunk.rb +844 -0
- data/test/plugin/test_buffer_memory_chunk.rb +338 -0
- data/test/plugin/test_compressable.rb +84 -0
- data/test/plugin/test_filter.rb +357 -0
- data/test/plugin/test_filter_grep.rb +540 -29
- data/test/plugin/test_filter_parser.rb +439 -452
- data/test/plugin/test_filter_record_transformer.rb +123 -166
- data/test/plugin/test_filter_stdout.rb +160 -72
- data/test/plugin/test_formatter_csv.rb +111 -0
- data/test/plugin/test_formatter_hash.rb +35 -0
- data/test/plugin/test_formatter_json.rb +51 -0
- data/test/plugin/test_formatter_ltsv.rb +62 -0
- data/test/plugin/test_formatter_msgpack.rb +28 -0
- data/test/plugin/test_formatter_out_file.rb +95 -0
- data/test/plugin/test_formatter_single_value.rb +38 -0
- data/test/plugin/test_formatter_tsv.rb +68 -0
- data/test/plugin/test_in_debug_agent.rb +24 -1
- data/test/plugin/test_in_dummy.rb +111 -18
- data/test/plugin/test_in_exec.rb +200 -113
- data/test/plugin/test_in_forward.rb +990 -387
- data/test/plugin/test_in_gc_stat.rb +10 -8
- data/test/plugin/test_in_http.rb +600 -224
- data/test/plugin/test_in_monitor_agent.rb +690 -0
- data/test/plugin/test_in_object_space.rb +24 -8
- data/test/plugin/test_in_syslog.rb +154 -215
- data/test/plugin/test_in_tail.rb +1006 -707
- data/test/plugin/test_in_tcp.rb +125 -48
- data/test/plugin/test_in_udp.rb +204 -63
- data/test/plugin/{test_in_stream.rb → test_in_unix.rb} +14 -13
- data/test/plugin/test_input.rb +126 -0
- data/test/plugin/test_metadata.rb +89 -0
- data/test/plugin/test_multi_output.rb +180 -0
- data/test/plugin/test_out_copy.rb +117 -112
- data/test/plugin/test_out_exec.rb +258 -53
- data/test/plugin/test_out_exec_filter.rb +538 -115
- data/test/plugin/test_out_file.rb +865 -178
- data/test/plugin/test_out_forward.rb +998 -210
- data/test/plugin/test_out_null.rb +105 -0
- data/test/plugin/test_out_relabel.rb +28 -0
- data/test/plugin/test_out_roundrobin.rb +36 -29
- data/test/plugin/test_out_secondary_file.rb +458 -0
- data/test/plugin/test_out_stdout.rb +135 -37
- data/test/plugin/test_out_stream.rb +18 -0
- data/test/plugin/test_output.rb +984 -0
- data/test/plugin/test_output_as_buffered.rb +2021 -0
- data/test/plugin/test_output_as_buffered_backup.rb +312 -0
- data/test/plugin/test_output_as_buffered_compress.rb +165 -0
- data/test/plugin/test_output_as_buffered_overflow.rb +250 -0
- data/test/plugin/test_output_as_buffered_retries.rb +911 -0
- data/test/plugin/test_output_as_buffered_secondary.rb +874 -0
- data/test/plugin/test_output_as_standard.rb +374 -0
- data/test/plugin/test_owned_by.rb +35 -0
- data/test/plugin/test_parser.rb +359 -0
- data/test/plugin/test_parser_apache.rb +42 -0
- data/test/plugin/test_parser_apache2.rb +47 -0
- data/test/plugin/test_parser_apache_error.rb +45 -0
- data/test/plugin/test_parser_csv.rb +103 -0
- data/test/plugin/test_parser_json.rb +138 -0
- data/test/plugin/test_parser_labeled_tsv.rb +145 -0
- data/test/plugin/test_parser_multiline.rb +100 -0
- data/test/plugin/test_parser_nginx.rb +88 -0
- data/test/plugin/test_parser_none.rb +52 -0
- data/test/plugin/test_parser_regexp.rb +289 -0
- data/test/plugin/test_parser_syslog.rb +441 -0
- data/test/plugin/test_parser_tsv.rb +122 -0
- data/test/plugin/test_storage.rb +167 -0
- data/test/plugin/test_storage_local.rb +335 -0
- data/test/plugin_helper/data/cert/cert-key.pem +27 -0
- data/test/plugin_helper/data/cert/cert-with-no-newline.pem +19 -0
- data/test/plugin_helper/data/cert/cert.pem +19 -0
- data/test/plugin_helper/http_server/test_app.rb +65 -0
- data/test/plugin_helper/http_server/test_route.rb +32 -0
- data/test/plugin_helper/test_cert_option.rb +16 -0
- data/test/plugin_helper/test_child_process.rb +794 -0
- data/test/plugin_helper/test_compat_parameters.rb +353 -0
- data/test/plugin_helper/test_event_emitter.rb +51 -0
- data/test/plugin_helper/test_event_loop.rb +52 -0
- data/test/plugin_helper/test_extract.rb +194 -0
- data/test/plugin_helper/test_formatter.rb +255 -0
- data/test/plugin_helper/test_http_server_helper.rb +205 -0
- data/test/plugin_helper/test_inject.rb +519 -0
- data/test/plugin_helper/test_parser.rb +264 -0
- data/test/plugin_helper/test_record_accessor.rb +197 -0
- data/test/plugin_helper/test_retry_state.rb +442 -0
- data/test/plugin_helper/test_server.rb +1714 -0
- data/test/plugin_helper/test_storage.rb +542 -0
- data/test/plugin_helper/test_thread.rb +164 -0
- data/test/plugin_helper/test_timer.rb +132 -0
- data/test/scripts/exec_script.rb +0 -6
- data/test/scripts/fluent/plugin/formatter1/formatter_test1.rb +7 -0
- data/test/scripts/fluent/plugin/formatter2/formatter_test2.rb +7 -0
- data/test/scripts/fluent/plugin/out_test.rb +23 -15
- data/test/scripts/fluent/plugin/out_test2.rb +80 -0
- data/test/test_clock.rb +164 -0
- data/test/test_config.rb +16 -7
- data/test/test_configdsl.rb +2 -2
- data/test/test_event.rb +360 -13
- data/test/test_event_router.rb +108 -11
- data/test/test_event_time.rb +199 -0
- data/test/test_filter.rb +48 -6
- data/test/test_formatter.rb +11 -391
- data/test/test_input.rb +1 -1
- data/test/test_log.rb +591 -31
- data/test/test_mixin.rb +1 -1
- data/test/test_output.rb +121 -185
- data/test/test_plugin.rb +251 -0
- data/test/test_plugin_classes.rb +177 -10
- data/test/test_plugin_helper.rb +81 -0
- data/test/test_plugin_id.rb +101 -0
- data/test/test_process.rb +8 -42
- data/test/test_root_agent.rb +766 -21
- data/test/test_supervisor.rb +481 -0
- data/test/test_test_drivers.rb +135 -0
- data/test/test_time_formatter.rb +282 -0
- data/test/test_time_parser.rb +231 -0
- data/test/test_unique_id.rb +47 -0
- metadata +454 -60
- data/COPYING +0 -14
- data/ChangeLog +0 -666
- data/lib/fluent/buffer.rb +0 -365
- data/lib/fluent/plugin/in_status.rb +0 -76
- data/test/plugin/test_in_status.rb +0 -38
- data/test/test_buffer.rb +0 -624
- data/test/test_parser.rb +0 -1305
@@ -0,0 +1,1498 @@
|
|
1
|
+
#
|
2
|
+
# Fluentd
|
3
|
+
#
|
4
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5
|
+
# you may not use this file except in compliance with the License.
|
6
|
+
# You may obtain a copy of the License at
|
7
|
+
#
|
8
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9
|
+
#
|
10
|
+
# Unless required by applicable law or agreed to in writing, software
|
11
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13
|
+
# See the License for the specific language governing permissions and
|
14
|
+
# limitations under the License.
|
15
|
+
#
|
16
|
+
|
17
|
+
require 'fluent/error'
|
18
|
+
require 'fluent/plugin/base'
|
19
|
+
require 'fluent/plugin/buffer'
|
20
|
+
require 'fluent/plugin_helper/record_accessor'
|
21
|
+
require 'fluent/log'
|
22
|
+
require 'fluent/plugin_id'
|
23
|
+
require 'fluent/plugin_helper'
|
24
|
+
require 'fluent/timezone'
|
25
|
+
require 'fluent/unique_id'
|
26
|
+
require 'fluent/clock'
|
27
|
+
|
28
|
+
require 'time'
|
29
|
+
require 'monitor'
|
30
|
+
|
31
|
+
module Fluent
|
32
|
+
module Plugin
|
33
|
+
class Output < Base
|
34
|
+
include PluginId
|
35
|
+
include PluginLoggerMixin
|
36
|
+
include PluginHelper::Mixin
|
37
|
+
include UniqueId::Mixin
|
38
|
+
|
39
|
+
helpers_internal :thread, :retry_state
|
40
|
+
|
41
|
+
CHUNK_KEY_PATTERN = /^[-_.@a-zA-Z0-9]+$/
|
42
|
+
CHUNK_KEY_PLACEHOLDER_PATTERN = /\$\{[-_.@$a-zA-Z0-9]+\}/
|
43
|
+
CHUNK_TAG_PLACEHOLDER_PATTERN = /\$\{(tag(?:\[-?\d+\])?)\}/
|
44
|
+
CHUNK_ID_PLACEHOLDER_PATTERN = /\$\{chunk_id\}/
|
45
|
+
|
46
|
+
CHUNKING_FIELD_WARN_NUM = 4
|
47
|
+
|
48
|
+
config_param :time_as_integer, :bool, default: false
|
49
|
+
desc 'The threshold to show slow flush logs'
|
50
|
+
config_param :slow_flush_log_threshold, :float, default: 20.0
|
51
|
+
|
52
|
+
# `<buffer>` and `<secondary>` sections are available only when '#format' and '#write' are implemented
|
53
|
+
config_section :buffer, param_name: :buffer_config, init: true, required: false, multi: false, final: true do
|
54
|
+
config_argument :chunk_keys, :array, value_type: :string, default: []
|
55
|
+
config_param :@type, :string, default: 'memory', alias: :type
|
56
|
+
|
57
|
+
config_param :timekey, :time, default: nil # range size to be used: `time.to_i / @timekey`
|
58
|
+
config_param :timekey_wait, :time, default: 600
|
59
|
+
# These are for #extract_placeholders
|
60
|
+
config_param :timekey_use_utc, :bool, default: false # default is localtime
|
61
|
+
config_param :timekey_zone, :string, default: Time.now.strftime('%z') # e.g., "-0700" or "Asia/Tokyo"
|
62
|
+
|
63
|
+
desc 'If true, plugin will try to flush buffer just before shutdown.'
|
64
|
+
config_param :flush_at_shutdown, :bool, default: nil # change default by buffer_plugin.persistent?
|
65
|
+
|
66
|
+
desc 'How to enqueue chunks to be flushed. "interval" flushes per flush_interval, "immediate" flushes just after event arrival.'
|
67
|
+
config_param :flush_mode, :enum, list: [:default, :lazy, :interval, :immediate], default: :default
|
68
|
+
config_param :flush_interval, :time, default: 60, desc: 'The interval between buffer chunk flushes.'
|
69
|
+
|
70
|
+
config_param :flush_thread_count, :integer, default: 1, desc: 'The number of threads to flush the buffer.'
|
71
|
+
|
72
|
+
config_param :flush_thread_interval, :float, default: 1.0, desc: 'Seconds to sleep between checks for buffer flushes in flush threads.'
|
73
|
+
config_param :flush_thread_burst_interval, :float, default: 1.0, desc: 'Seconds to sleep between flushes when many buffer chunks are queued.'
|
74
|
+
|
75
|
+
config_param :delayed_commit_timeout, :time, default: 60, desc: 'Seconds of timeout for buffer chunks to be committed by plugins later.'
|
76
|
+
|
77
|
+
config_param :overflow_action, :enum, list: [:throw_exception, :block, :drop_oldest_chunk], default: :throw_exception, desc: 'The action when the size of buffer exceeds the limit.'
|
78
|
+
|
79
|
+
config_param :retry_forever, :bool, default: false, desc: 'If true, plugin will ignore retry_timeout and retry_max_times options and retry flushing forever.'
|
80
|
+
config_param :retry_timeout, :time, default: 72 * 60 * 60, desc: 'The maximum seconds to retry to flush while failing, until plugin discards buffer chunks.'
|
81
|
+
# 72hours == 17 times with exponential backoff (not to change default behavior)
|
82
|
+
config_param :retry_max_times, :integer, default: nil, desc: 'The maximum number of times to retry to flush while failing.'
|
83
|
+
|
84
|
+
config_param :retry_secondary_threshold, :float, default: 0.8, desc: 'ratio of retry_timeout to switch to use secondary while failing.'
|
85
|
+
# exponential backoff sequence will be initialized at the time of this threshold
|
86
|
+
|
87
|
+
desc 'How to wait next retry to flush buffer.'
|
88
|
+
config_param :retry_type, :enum, list: [:exponential_backoff, :periodic], default: :exponential_backoff
|
89
|
+
### Periodic -> fixed :retry_wait
|
90
|
+
### Exponential backoff: k is number of retry times
|
91
|
+
# c: constant factor, @retry_wait
|
92
|
+
# b: base factor, @retry_exponential_backoff_base
|
93
|
+
# k: times
|
94
|
+
# total retry time: c + c * b^1 + (...) + c*b^k = c*b^(k+1) - 1
|
95
|
+
config_param :retry_wait, :time, default: 1, desc: 'Seconds to wait before next retry to flush, or constant factor of exponential backoff.'
|
96
|
+
config_param :retry_exponential_backoff_base, :float, default: 2, desc: 'The base number of exponential backoff for retries.'
|
97
|
+
config_param :retry_max_interval, :time, default: nil, desc: 'The maximum interval seconds for exponential backoff between retries while failing.'
|
98
|
+
|
99
|
+
config_param :retry_randomize, :bool, default: true, desc: 'If true, output plugin will retry after randomized interval not to do burst retries.'
|
100
|
+
config_param :disable_chunk_backup, :bool, default: false, desc: 'If true, chunks are thrown away when unrecoverable error happens'
|
101
|
+
end
|
102
|
+
|
103
|
+
config_section :secondary, param_name: :secondary_config, required: false, multi: false, final: true do
|
104
|
+
config_param :@type, :string, default: nil, alias: :type
|
105
|
+
config_section :buffer, required: false, multi: false do
|
106
|
+
# dummy to detect invalid specification for here
|
107
|
+
end
|
108
|
+
config_section :secondary, required: false, multi: false do
|
109
|
+
# dummy to detect invalid specification for here
|
110
|
+
end
|
111
|
+
end
|
112
|
+
|
113
|
+
def process(tag, es)
|
114
|
+
raise NotImplementedError, "BUG: output plugins MUST implement this method"
|
115
|
+
end
|
116
|
+
|
117
|
+
def write(chunk)
|
118
|
+
raise NotImplementedError, "BUG: output plugins MUST implement this method"
|
119
|
+
end
|
120
|
+
|
121
|
+
def try_write(chunk)
|
122
|
+
raise NotImplementedError, "BUG: output plugins MUST implement this method"
|
123
|
+
end
|
124
|
+
|
125
|
+
def format(tag, time, record)
|
126
|
+
# standard msgpack_event_stream chunk will be used if this method is not implemented in plugin subclass
|
127
|
+
raise NotImplementedError, "BUG: output plugins MUST implement this method"
|
128
|
+
end
|
129
|
+
|
130
|
+
def formatted_to_msgpack_binary?
|
131
|
+
# To indicate custom format method (#format) returns msgpack binary or not.
|
132
|
+
# If #format returns msgpack binary, override this method to return true.
|
133
|
+
false
|
134
|
+
end
|
135
|
+
|
136
|
+
# Compatibility for existing plugins
|
137
|
+
def formatted_to_msgpack_binary
|
138
|
+
formatted_to_msgpack_binary?
|
139
|
+
end
|
140
|
+
|
141
|
+
def prefer_buffered_processing
|
142
|
+
# override this method to return false only when all of these are true:
|
143
|
+
# * plugin has both implementation for buffered and non-buffered methods
|
144
|
+
# * plugin is expected to work as non-buffered plugin if no `<buffer>` sections specified
|
145
|
+
true
|
146
|
+
end
|
147
|
+
|
148
|
+
def prefer_delayed_commit
|
149
|
+
# override this method to decide which is used of `write` or `try_write` if both are implemented
|
150
|
+
true
|
151
|
+
end
|
152
|
+
|
153
|
+
def multi_workers_ready?
|
154
|
+
false
|
155
|
+
end
|
156
|
+
|
157
|
+
# Internal states
|
158
|
+
FlushThreadState = Struct.new(:thread, :next_clock, :mutex, :cond_var)
|
159
|
+
DequeuedChunkInfo = Struct.new(:chunk_id, :time, :timeout) do
|
160
|
+
def expired?
|
161
|
+
time + timeout < Time.now
|
162
|
+
end
|
163
|
+
end
|
164
|
+
|
165
|
+
attr_reader :as_secondary, :delayed_commit, :delayed_commit_timeout, :timekey_zone
|
166
|
+
attr_reader :num_errors, :emit_count, :emit_records, :write_count, :rollback_count
|
167
|
+
|
168
|
+
# for tests
|
169
|
+
attr_reader :buffer, :retry, :secondary, :chunk_keys, :chunk_key_accessors, :chunk_key_time, :chunk_key_tag
|
170
|
+
attr_accessor :output_enqueue_thread_waiting, :dequeued_chunks, :dequeued_chunks_mutex
|
171
|
+
# output_enqueue_thread_waiting: for test of output.rb itself
|
172
|
+
attr_accessor :retry_for_error_chunk # if true, error flush will be retried even if under_plugin_development is true
|
173
|
+
|
174
|
+
def initialize
|
175
|
+
super
|
176
|
+
@counters_monitor = Monitor.new
|
177
|
+
@buffering = false
|
178
|
+
@delayed_commit = false
|
179
|
+
@as_secondary = false
|
180
|
+
@primary_instance = nil
|
181
|
+
|
182
|
+
# TODO: well organized counters
|
183
|
+
@num_errors = 0
|
184
|
+
@emit_count = 0
|
185
|
+
@emit_records = 0
|
186
|
+
@write_count = 0
|
187
|
+
@rollback_count = 0
|
188
|
+
@flush_time_count = 0
|
189
|
+
@slow_flush_count = 0
|
190
|
+
|
191
|
+
# How to process events is decided here at once, but it will be decided in delayed way on #configure & #start
|
192
|
+
if implement?(:synchronous)
|
193
|
+
if implement?(:buffered) || implement?(:delayed_commit)
|
194
|
+
@buffering = nil # do #configure or #start to determine this for full-featured plugins
|
195
|
+
else
|
196
|
+
@buffering = false
|
197
|
+
end
|
198
|
+
else
|
199
|
+
@buffering = true
|
200
|
+
end
|
201
|
+
@custom_format = implement?(:custom_format)
|
202
|
+
@enable_msgpack_streamer = false # decided later
|
203
|
+
|
204
|
+
@buffer = nil
|
205
|
+
@secondary = nil
|
206
|
+
@retry = nil
|
207
|
+
@dequeued_chunks = nil
|
208
|
+
@dequeued_chunks_mutex = nil
|
209
|
+
@output_enqueue_thread = nil
|
210
|
+
@output_flush_threads = nil
|
211
|
+
|
212
|
+
@simple_chunking = nil
|
213
|
+
@chunk_keys = @chunk_key_accessors = @chunk_key_time = @chunk_key_tag = nil
|
214
|
+
@flush_mode = nil
|
215
|
+
@timekey_zone = nil
|
216
|
+
|
217
|
+
@retry_for_error_chunk = false
|
218
|
+
end
|
219
|
+
|
220
|
+
def acts_as_secondary(primary)
|
221
|
+
@as_secondary = true
|
222
|
+
@primary_instance = primary
|
223
|
+
@chunk_keys = @primary_instance.chunk_keys || []
|
224
|
+
@chunk_key_tag = @primary_instance.chunk_key_tag || false
|
225
|
+
if @primary_instance.chunk_key_time
|
226
|
+
@chunk_key_time = @primary_instance.chunk_key_time
|
227
|
+
@timekey_zone = @primary_instance.timekey_zone
|
228
|
+
@output_time_formatter_cache = {}
|
229
|
+
end
|
230
|
+
self.context_router = primary.context_router
|
231
|
+
|
232
|
+
singleton_class.module_eval do
|
233
|
+
define_method(:commit_write){ |chunk_id| @primary_instance.commit_write(chunk_id, delayed: delayed_commit, secondary: true) }
|
234
|
+
define_method(:rollback_write){ |chunk_id, update_retry: true| @primary_instance.rollback_write(chunk_id, update_retry) }
|
235
|
+
end
|
236
|
+
end
|
237
|
+
|
238
|
+
def configure(conf)
|
239
|
+
unless implement?(:synchronous) || implement?(:buffered) || implement?(:delayed_commit)
|
240
|
+
raise "BUG: output plugin must implement some methods. see developer documents."
|
241
|
+
end
|
242
|
+
|
243
|
+
has_buffer_section = (conf.elements(name: 'buffer').size > 0)
|
244
|
+
has_flush_interval = conf.has_key?('flush_interval')
|
245
|
+
|
246
|
+
super
|
247
|
+
|
248
|
+
if has_buffer_section
|
249
|
+
unless implement?(:buffered) || implement?(:delayed_commit)
|
250
|
+
raise Fluent::ConfigError, "<buffer> section is configured, but plugin '#{self.class}' doesn't support buffering"
|
251
|
+
end
|
252
|
+
@buffering = true
|
253
|
+
else # no buffer sections
|
254
|
+
if implement?(:synchronous)
|
255
|
+
if !implement?(:buffered) && !implement?(:delayed_commit)
|
256
|
+
if @as_secondary
|
257
|
+
raise Fluent::ConfigError, "secondary plugin '#{self.class}' must support buffering, but doesn't."
|
258
|
+
end
|
259
|
+
@buffering = false
|
260
|
+
else
|
261
|
+
if @as_secondary
|
262
|
+
# secondary plugin always works as buffered plugin without buffer instance
|
263
|
+
@buffering = true
|
264
|
+
else
|
265
|
+
# @buffering.nil? shows that enabling buffering or not will be decided in lazy way in #start
|
266
|
+
@buffering = nil
|
267
|
+
end
|
268
|
+
end
|
269
|
+
else # buffered or delayed_commit is supported by `unless` of first line in this method
|
270
|
+
@buffering = true
|
271
|
+
end
|
272
|
+
end
|
273
|
+
|
274
|
+
if @as_secondary
|
275
|
+
if !@buffering && !@buffering.nil?
|
276
|
+
raise Fluent::ConfigError, "secondary plugin '#{self.class}' must support buffering, but doesn't"
|
277
|
+
end
|
278
|
+
end
|
279
|
+
|
280
|
+
if (@buffering || @buffering.nil?) && !@as_secondary
|
281
|
+
# When @buffering.nil?, @buffer_config was initialized with default value for all parameters.
|
282
|
+
# If so, this configuration MUST success.
|
283
|
+
@chunk_keys = @buffer_config.chunk_keys.dup
|
284
|
+
@chunk_key_time = !!@chunk_keys.delete('time')
|
285
|
+
@chunk_key_tag = !!@chunk_keys.delete('tag')
|
286
|
+
if @chunk_keys.any? { |key|
|
287
|
+
begin
|
288
|
+
k = Fluent::PluginHelper::RecordAccessor::Accessor.parse_parameter(key)
|
289
|
+
if k.is_a?(String)
|
290
|
+
k !~ CHUNK_KEY_PATTERN
|
291
|
+
else
|
292
|
+
if key.start_with?('$[')
|
293
|
+
raise Fluent::ConfigError, "in chunk_keys: bracket notation is not allowed"
|
294
|
+
else
|
295
|
+
false
|
296
|
+
end
|
297
|
+
end
|
298
|
+
rescue => e
|
299
|
+
raise Fluent::ConfigError, "in chunk_keys: #{e.message}"
|
300
|
+
end
|
301
|
+
}
|
302
|
+
raise Fluent::ConfigError, "chunk_keys specification includes invalid char"
|
303
|
+
else
|
304
|
+
@chunk_key_accessors = Hash[@chunk_keys.map { |key| [key.to_sym, Fluent::PluginHelper::RecordAccessor::Accessor.new(key)] }]
|
305
|
+
end
|
306
|
+
|
307
|
+
if @chunk_key_time
|
308
|
+
raise Fluent::ConfigError, "<buffer ...> argument includes 'time', but timekey is not configured" unless @buffer_config.timekey
|
309
|
+
Fluent::Timezone.validate!(@buffer_config.timekey_zone)
|
310
|
+
@timekey_zone = @buffer_config.timekey_use_utc ? '+0000' : @buffer_config.timekey_zone
|
311
|
+
@timekey = @buffer_config.timekey
|
312
|
+
@timekey_use_utc = @buffer_config.timekey_use_utc
|
313
|
+
@offset = Fluent::Timezone.utc_offset(@timekey_zone)
|
314
|
+
@calculate_offset = @offset.respond_to?(:call) ? @offset : nil
|
315
|
+
@output_time_formatter_cache = {}
|
316
|
+
end
|
317
|
+
|
318
|
+
if (@chunk_key_tag ? 1 : 0) + @chunk_keys.size >= CHUNKING_FIELD_WARN_NUM
|
319
|
+
log.warn "many chunk keys specified, and it may cause too many chunks on your system."
|
320
|
+
end
|
321
|
+
|
322
|
+
# no chunk keys or only tags (chunking can be done without iterating event stream)
|
323
|
+
@simple_chunking = !@chunk_key_time && @chunk_keys.empty?
|
324
|
+
|
325
|
+
@flush_mode = @buffer_config.flush_mode
|
326
|
+
if @flush_mode == :default
|
327
|
+
if has_flush_interval
|
328
|
+
log.info "'flush_interval' is configured at out side of <buffer>. 'flush_mode' is set to 'interval' to keep existing behaviour"
|
329
|
+
@flush_mode = :interval
|
330
|
+
else
|
331
|
+
@flush_mode = (@chunk_key_time ? :lazy : :interval)
|
332
|
+
end
|
333
|
+
end
|
334
|
+
|
335
|
+
buffer_type = @buffer_config[:@type]
|
336
|
+
buffer_conf = conf.elements(name: 'buffer').first || Fluent::Config::Element.new('buffer', '', {}, [])
|
337
|
+
@buffer = Plugin.new_buffer(buffer_type, parent: self)
|
338
|
+
@buffer.configure(buffer_conf)
|
339
|
+
|
340
|
+
@flush_at_shutdown = @buffer_config.flush_at_shutdown
|
341
|
+
if @flush_at_shutdown.nil?
|
342
|
+
@flush_at_shutdown = if @buffer.persistent?
|
343
|
+
false
|
344
|
+
else
|
345
|
+
true # flush_at_shutdown is true in default for on-memory buffer
|
346
|
+
end
|
347
|
+
elsif !@flush_at_shutdown && !@buffer.persistent?
|
348
|
+
buf_type = Plugin.lookup_type_from_class(@buffer.class)
|
349
|
+
log.warn "'flush_at_shutdown' is false, and buffer plugin '#{buf_type}' is not persistent buffer."
|
350
|
+
log.warn "your configuration will lose buffered data at shutdown. please confirm your configuration again."
|
351
|
+
end
|
352
|
+
|
353
|
+
if (@flush_mode != :interval) && buffer_conf.has_key?('flush_interval')
|
354
|
+
if buffer_conf.has_key?('flush_mode')
|
355
|
+
raise Fluent::ConfigError, "'flush_interval' can't be specified when 'flush_mode' is not 'interval' explicitly: '#{@flush_mode}'"
|
356
|
+
else
|
357
|
+
log.warn "'flush_interval' is ignored because default 'flush_mode' is not 'interval': '#{@flush_mode}'"
|
358
|
+
end
|
359
|
+
end
|
360
|
+
|
361
|
+
if @buffer.queued_chunks_limit_size.nil?
|
362
|
+
@buffer.queued_chunks_limit_size = @buffer_config.flush_thread_count
|
363
|
+
end
|
364
|
+
end
|
365
|
+
|
366
|
+
if @secondary_config
|
367
|
+
raise Fluent::ConfigError, "Invalid <secondary> section for non-buffered plugin" unless @buffering
|
368
|
+
raise Fluent::ConfigError, "<secondary> section cannot have <buffer> section" if @secondary_config.buffer
|
369
|
+
raise Fluent::ConfigError, "<secondary> section cannot have <secondary> section" if @secondary_config.secondary
|
370
|
+
if @buffer_config.retry_forever
|
371
|
+
log.warn "<secondary> with 'retry_forever', only unrecoverable errors are moved to secondary"
|
372
|
+
end
|
373
|
+
|
374
|
+
secondary_type = @secondary_config[:@type]
|
375
|
+
unless secondary_type
|
376
|
+
secondary_type = conf['@type'] # primary plugin type
|
377
|
+
end
|
378
|
+
secondary_conf = conf.elements(name: 'secondary').first
|
379
|
+
@secondary = Plugin.new_output(secondary_type)
|
380
|
+
unless @secondary.respond_to?(:acts_as_secondary)
|
381
|
+
raise Fluent::ConfigError, "Failed to setup secondary plugin in '#{conf['@type']}'. '#{secondary_type}' plugin in not allowed due to non buffered output"
|
382
|
+
end
|
383
|
+
@secondary.acts_as_secondary(self)
|
384
|
+
@secondary.configure(secondary_conf)
|
385
|
+
if (self.class != @secondary.class) && (@custom_format || @secondary.implement?(:custom_format))
|
386
|
+
log.warn "secondary type should be same with primary one", primary: self.class.to_s, secondary: @secondary.class.to_s
|
387
|
+
end
|
388
|
+
else
|
389
|
+
@secondary = nil
|
390
|
+
end
|
391
|
+
|
392
|
+
self
|
393
|
+
end
|
394
|
+
|
395
|
+
def start
|
396
|
+
super
|
397
|
+
|
398
|
+
if @buffering.nil?
|
399
|
+
@buffering = prefer_buffered_processing
|
400
|
+
if !@buffering && @buffer
|
401
|
+
@buffer.terminate # it's not started, so terminate will be enough
|
402
|
+
# At here, this plugin works as non-buffered plugin.
|
403
|
+
# Un-assign @buffer not to show buffering metrics (e.g., in_monitor_agent)
|
404
|
+
@buffer = nil
|
405
|
+
end
|
406
|
+
end
|
407
|
+
|
408
|
+
if @buffering
|
409
|
+
m = method(:emit_buffered)
|
410
|
+
singleton_class.module_eval do
|
411
|
+
define_method(:emit_events, m)
|
412
|
+
end
|
413
|
+
|
414
|
+
@custom_format = implement?(:custom_format)
|
415
|
+
@enable_msgpack_streamer = @custom_format ? formatted_to_msgpack_binary : true
|
416
|
+
@delayed_commit = if implement?(:buffered) && implement?(:delayed_commit)
|
417
|
+
prefer_delayed_commit
|
418
|
+
else
|
419
|
+
implement?(:delayed_commit)
|
420
|
+
end
|
421
|
+
@delayed_commit_timeout = @buffer_config.delayed_commit_timeout
|
422
|
+
else # !@buffering
|
423
|
+
m = method(:emit_sync)
|
424
|
+
singleton_class.module_eval do
|
425
|
+
define_method(:emit_events, m)
|
426
|
+
end
|
427
|
+
end
|
428
|
+
|
429
|
+
if @buffering && !@as_secondary
|
430
|
+
@retry = nil
|
431
|
+
@retry_mutex = Mutex.new
|
432
|
+
|
433
|
+
@buffer.start
|
434
|
+
|
435
|
+
@output_enqueue_thread = nil
|
436
|
+
@output_enqueue_thread_running = true
|
437
|
+
|
438
|
+
@output_flush_threads = []
|
439
|
+
@output_flush_threads_mutex = Mutex.new
|
440
|
+
@output_flush_threads_running = true
|
441
|
+
|
442
|
+
# mainly for test: detect enqueue works as code below:
|
443
|
+
# @output.interrupt_flushes
|
444
|
+
# # emits
|
445
|
+
# @output.enqueue_thread_wait
|
446
|
+
@output_flush_interrupted = false
|
447
|
+
@output_enqueue_thread_mutex = Mutex.new
|
448
|
+
@output_enqueue_thread_waiting = false
|
449
|
+
|
450
|
+
@dequeued_chunks = []
|
451
|
+
@dequeued_chunks_mutex = Mutex.new
|
452
|
+
|
453
|
+
@buffer_config.flush_thread_count.times do |i|
|
454
|
+
thread_title = "flush_thread_#{i}".to_sym
|
455
|
+
thread_state = FlushThreadState.new(nil, nil, Mutex.new, ConditionVariable.new)
|
456
|
+
thread = thread_create(thread_title) do
|
457
|
+
flush_thread_run(thread_state)
|
458
|
+
end
|
459
|
+
thread_state.thread = thread
|
460
|
+
@output_flush_threads_mutex.synchronize do
|
461
|
+
@output_flush_threads << thread_state
|
462
|
+
end
|
463
|
+
end
|
464
|
+
@output_flush_thread_current_position = 0
|
465
|
+
|
466
|
+
if !@under_plugin_development && (@flush_mode == :interval || @chunk_key_time)
|
467
|
+
@output_enqueue_thread = thread_create(:enqueue_thread, &method(:enqueue_thread_run))
|
468
|
+
end
|
469
|
+
end
|
470
|
+
@secondary.start if @secondary
|
471
|
+
end
|
472
|
+
|
473
|
+
def after_start
|
474
|
+
super
|
475
|
+
@secondary.after_start if @secondary
|
476
|
+
end
|
477
|
+
|
478
|
+
def stop
|
479
|
+
@secondary.stop if @secondary
|
480
|
+
@buffer.stop if @buffering && @buffer
|
481
|
+
|
482
|
+
super
|
483
|
+
end
|
484
|
+
|
485
|
+
def before_shutdown
|
486
|
+
@secondary.before_shutdown if @secondary
|
487
|
+
|
488
|
+
if @buffering && @buffer
|
489
|
+
if @flush_at_shutdown
|
490
|
+
force_flush
|
491
|
+
end
|
492
|
+
@buffer.before_shutdown
|
493
|
+
# Need to ensure to stop enqueueing ... after #shutdown, we cannot write any data
|
494
|
+
@output_enqueue_thread_running = false
|
495
|
+
if @output_enqueue_thread && @output_enqueue_thread.alive?
|
496
|
+
@output_enqueue_thread.wakeup
|
497
|
+
@output_enqueue_thread.join
|
498
|
+
end
|
499
|
+
end
|
500
|
+
|
501
|
+
super
|
502
|
+
end
|
503
|
+
|
504
|
+
def shutdown
|
505
|
+
@secondary.shutdown if @secondary
|
506
|
+
@buffer.shutdown if @buffering && @buffer
|
507
|
+
|
508
|
+
super
|
509
|
+
end
|
510
|
+
|
511
|
+
def after_shutdown
|
512
|
+
try_rollback_all if @buffering && !@as_secondary # rollback regardless with @delayed_commit, because secondary may do it
|
513
|
+
@secondary.after_shutdown if @secondary
|
514
|
+
|
515
|
+
if @buffering && @buffer
|
516
|
+
@buffer.after_shutdown
|
517
|
+
|
518
|
+
@output_flush_threads_running = false
|
519
|
+
if @output_flush_threads && !@output_flush_threads.empty?
|
520
|
+
@output_flush_threads.each do |state|
|
521
|
+
# to wakeup thread and make it to stop by itself
|
522
|
+
state.mutex.synchronize {
|
523
|
+
if state.thread && state.thread.status
|
524
|
+
state.next_clock = 0
|
525
|
+
state.cond_var.signal
|
526
|
+
end
|
527
|
+
}
|
528
|
+
Thread.pass
|
529
|
+
state.thread.join
|
530
|
+
end
|
531
|
+
end
|
532
|
+
end
|
533
|
+
|
534
|
+
super
|
535
|
+
end
|
536
|
+
|
537
|
+
def close
|
538
|
+
@buffer.close if @buffering && @buffer
|
539
|
+
@secondary.close if @secondary
|
540
|
+
|
541
|
+
super
|
542
|
+
end
|
543
|
+
|
544
|
+
def terminate
|
545
|
+
@buffer.terminate if @buffering && @buffer
|
546
|
+
@secondary.terminate if @secondary
|
547
|
+
|
548
|
+
super
|
549
|
+
end
|
550
|
+
|
551
|
+
def support_in_v12_style?(feature)
|
552
|
+
# for plugins written in v0.12 styles
|
553
|
+
case feature
|
554
|
+
when :synchronous then false
|
555
|
+
when :buffered then false
|
556
|
+
when :delayed_commit then false
|
557
|
+
when :custom_format then false
|
558
|
+
else
|
559
|
+
raise ArgumentError, "unknown feature: #{feature}"
|
560
|
+
end
|
561
|
+
end
|
562
|
+
|
563
|
+
def implement?(feature)
|
564
|
+
methods_of_plugin = self.class.instance_methods(false)
|
565
|
+
case feature
|
566
|
+
when :synchronous then methods_of_plugin.include?(:process) || support_in_v12_style?(:synchronous)
|
567
|
+
when :buffered then methods_of_plugin.include?(:write) || support_in_v12_style?(:buffered)
|
568
|
+
when :delayed_commit then methods_of_plugin.include?(:try_write)
|
569
|
+
when :custom_format then methods_of_plugin.include?(:format) || support_in_v12_style?(:custom_format)
|
570
|
+
else
|
571
|
+
raise ArgumentError, "Unknown feature for output plugin: #{feature}"
|
572
|
+
end
|
573
|
+
end
|
574
|
+
|
575
|
+
def placeholder_validate!(name, str)
|
576
|
+
placeholder_validators(name, str).each do |v|
|
577
|
+
v.validate!
|
578
|
+
end
|
579
|
+
end
|
580
|
+
|
581
|
+
def placeholder_validators(name, str, time_key = (@chunk_key_time && @buffer_config.timekey), tag_key = @chunk_key_tag, chunk_keys = @chunk_keys)
|
582
|
+
validators = []
|
583
|
+
|
584
|
+
sec, title, example = get_placeholders_time(str)
|
585
|
+
if sec || time_key
|
586
|
+
validators << PlaceholderValidator.new(name, str, :time, {sec: sec, title: title, example: example, timekey: time_key})
|
587
|
+
end
|
588
|
+
|
589
|
+
parts = get_placeholders_tag(str)
|
590
|
+
if tag_key || !parts.empty?
|
591
|
+
validators << PlaceholderValidator.new(name, str, :tag, {parts: parts, tagkey: tag_key})
|
592
|
+
end
|
593
|
+
|
594
|
+
keys = get_placeholders_keys(str)
|
595
|
+
if chunk_keys && !chunk_keys.empty? || !keys.empty?
|
596
|
+
validators << PlaceholderValidator.new(name, str, :keys, {keys: keys, chunkkeys: chunk_keys})
|
597
|
+
end
|
598
|
+
|
599
|
+
validators
|
600
|
+
end
|
601
|
+
|
602
|
+
class PlaceholderValidator
|
603
|
+
attr_reader :name, :string, :type, :argument
|
604
|
+
|
605
|
+
def initialize(name, str, type, arg)
|
606
|
+
@name = name
|
607
|
+
@string = str
|
608
|
+
@type = type
|
609
|
+
raise ArgumentError, "invalid type:#{type}" if @type != :time && @type != :tag && @type != :keys
|
610
|
+
@argument = arg
|
611
|
+
end
|
612
|
+
|
613
|
+
def time?
|
614
|
+
@type == :time
|
615
|
+
end
|
616
|
+
|
617
|
+
def tag?
|
618
|
+
@type == :tag
|
619
|
+
end
|
620
|
+
|
621
|
+
def keys?
|
622
|
+
@type == :keys
|
623
|
+
end
|
624
|
+
|
625
|
+
def validate!
|
626
|
+
case @type
|
627
|
+
when :time then validate_time!
|
628
|
+
when :tag then validate_tag!
|
629
|
+
when :keys then validate_keys!
|
630
|
+
end
|
631
|
+
end
|
632
|
+
|
633
|
+
def validate_time!
|
634
|
+
sec = @argument[:sec]
|
635
|
+
title = @argument[:title]
|
636
|
+
example = @argument[:example]
|
637
|
+
timekey = @argument[:timekey]
|
638
|
+
if !sec && timekey
|
639
|
+
raise Fluent::ConfigError, "Parameter '#{name}: #{string}' doesn't have timestamp placeholders for timekey #{timekey.to_i}"
|
640
|
+
end
|
641
|
+
if sec && !timekey
|
642
|
+
raise Fluent::ConfigError, "Parameter '#{name}: #{string}' has timestamp placeholders, but chunk key 'time' is not configured"
|
643
|
+
end
|
644
|
+
if sec && timekey && timekey < sec
|
645
|
+
raise Fluent::ConfigError, "Parameter '#{name}: #{string}' doesn't have timestamp placeholder for #{title}('#{example}') for timekey #{timekey.to_i}"
|
646
|
+
end
|
647
|
+
end
|
648
|
+
|
649
|
+
def validate_tag!
|
650
|
+
parts = @argument[:parts]
|
651
|
+
tagkey = @argument[:tagkey]
|
652
|
+
if tagkey && parts.empty?
|
653
|
+
raise Fluent::ConfigError, "Parameter '#{name}: #{string}' doesn't have tag placeholder"
|
654
|
+
end
|
655
|
+
if !tagkey && !parts.empty?
|
656
|
+
raise Fluent::ConfigError, "Parameter '#{name}: #{string}' has tag placeholders, but chunk key 'tag' is not configured"
|
657
|
+
end
|
658
|
+
end
|
659
|
+
|
660
|
+
def validate_keys!
|
661
|
+
keys = @argument[:keys]
|
662
|
+
chunk_keys = @argument[:chunkkeys]
|
663
|
+
if (chunk_keys - keys).size > 0
|
664
|
+
not_specified = (chunk_keys - keys).sort
|
665
|
+
raise Fluent::ConfigError, "Parameter '#{name}: #{string}' doesn't have enough placeholders for keys #{not_specified.join(',')}"
|
666
|
+
end
|
667
|
+
if (keys - chunk_keys).size > 0
|
668
|
+
not_satisfied = (keys - chunk_keys).sort
|
669
|
+
raise Fluent::ConfigError, "Parameter '#{name}: #{string}' has placeholders, but chunk keys doesn't have keys #{not_satisfied.join(',')}"
|
670
|
+
end
|
671
|
+
end
|
672
|
+
end
|
673
|
+
|
674
|
+
TIME_KEY_PLACEHOLDER_THRESHOLDS = [
|
675
|
+
[1, :second, '%S'],
|
676
|
+
[60, :minute, '%M'],
|
677
|
+
[3600, :hour, '%H'],
|
678
|
+
[86400, :day, '%d'],
|
679
|
+
]
|
680
|
+
TIMESTAMP_CHECK_BASE_TIME = Time.parse("2016-01-01 00:00:00 UTC")
|
681
|
+
# it's not validated to use timekey larger than 1 day
|
682
|
+
def get_placeholders_time(str)
|
683
|
+
base_str = TIMESTAMP_CHECK_BASE_TIME.strftime(str)
|
684
|
+
TIME_KEY_PLACEHOLDER_THRESHOLDS.each do |triple|
|
685
|
+
sec = triple.first
|
686
|
+
return triple if (TIMESTAMP_CHECK_BASE_TIME + sec).strftime(str) != base_str
|
687
|
+
end
|
688
|
+
nil
|
689
|
+
end
|
690
|
+
|
691
|
+
# -1 means whole tag
|
692
|
+
def get_placeholders_tag(str)
|
693
|
+
# [["tag"],["tag[0]"]]
|
694
|
+
parts = []
|
695
|
+
str.scan(CHUNK_TAG_PLACEHOLDER_PATTERN).map(&:first).each do |ph|
|
696
|
+
if ph == "tag"
|
697
|
+
parts << -1
|
698
|
+
elsif ph =~ /^tag\[(-?\d+)\]$/
|
699
|
+
parts << $1.to_i
|
700
|
+
end
|
701
|
+
end
|
702
|
+
parts.sort
|
703
|
+
end
|
704
|
+
|
705
|
+
def get_placeholders_keys(str)
|
706
|
+
str.scan(CHUNK_KEY_PLACEHOLDER_PATTERN).map{|ph| ph[2..-2]}.reject{|s| (s == "tag") || (s == 'chunk_id') }.sort
|
707
|
+
end
|
708
|
+
|
709
|
+
# TODO: optimize this code
|
710
|
+
def extract_placeholders(str, chunk)
|
711
|
+
metadata = if chunk.is_a?(Fluent::Plugin::Buffer::Chunk)
|
712
|
+
chunk_passed = true
|
713
|
+
chunk.metadata
|
714
|
+
else
|
715
|
+
chunk_passed = false
|
716
|
+
# For existing plugins. Old plugin passes Chunk.metadata instead of Chunk
|
717
|
+
chunk
|
718
|
+
end
|
719
|
+
if metadata.empty?
|
720
|
+
str.sub(CHUNK_ID_PLACEHOLDER_PATTERN) {
|
721
|
+
if chunk_passed
|
722
|
+
dump_unique_id_hex(chunk.unique_id)
|
723
|
+
else
|
724
|
+
log.warn "${chunk_id} is not allowed in this plugin. Pass Chunk instead of metadata in extract_placeholders's 2nd argument"
|
725
|
+
end
|
726
|
+
}
|
727
|
+
else
|
728
|
+
rvalue = str.dup
|
729
|
+
# strftime formatting
|
730
|
+
if @chunk_key_time # this section MUST be earlier than rest to use raw 'str'
|
731
|
+
@output_time_formatter_cache[str] ||= Fluent::Timezone.formatter(@timekey_zone, str)
|
732
|
+
rvalue = @output_time_formatter_cache[str].call(metadata.timekey)
|
733
|
+
end
|
734
|
+
# ${tag}, ${tag[0]}, ${tag[1]}, ... , ${tag[-2]}, ${tag[-1]}
|
735
|
+
if @chunk_key_tag
|
736
|
+
if str.include?('${tag}')
|
737
|
+
rvalue = rvalue.gsub('${tag}', metadata.tag)
|
738
|
+
end
|
739
|
+
if str =~ CHUNK_TAG_PLACEHOLDER_PATTERN
|
740
|
+
hash = {}
|
741
|
+
tag_parts = metadata.tag.split('.')
|
742
|
+
tag_parts.each_with_index do |part, i|
|
743
|
+
hash["${tag[#{i}]}"] = part
|
744
|
+
hash["${tag[#{i-tag_parts.size}]}"] = part
|
745
|
+
end
|
746
|
+
rvalue = rvalue.gsub(CHUNK_TAG_PLACEHOLDER_PATTERN, hash)
|
747
|
+
end
|
748
|
+
if rvalue =~ CHUNK_TAG_PLACEHOLDER_PATTERN
|
749
|
+
log.warn "tag placeholder '#{$1}' not replaced. tag:#{metadata.tag}, template:#{str}"
|
750
|
+
end
|
751
|
+
end
|
752
|
+
# ${a_chunk_key}, ...
|
753
|
+
if !@chunk_keys.empty? && metadata.variables
|
754
|
+
hash = {'${tag}' => '${tag}'} # not to erase this wrongly
|
755
|
+
@chunk_keys.each do |key|
|
756
|
+
hash["${#{key}}"] = metadata.variables[key.to_sym]
|
757
|
+
end
|
758
|
+
rvalue = rvalue.gsub(CHUNK_KEY_PLACEHOLDER_PATTERN, hash)
|
759
|
+
end
|
760
|
+
if rvalue =~ CHUNK_KEY_PLACEHOLDER_PATTERN
|
761
|
+
log.warn "chunk key placeholder '#{$1}' not replaced. template:#{str}"
|
762
|
+
end
|
763
|
+
rvalue.sub(CHUNK_ID_PLACEHOLDER_PATTERN) {
|
764
|
+
if chunk_passed
|
765
|
+
dump_unique_id_hex(chunk.unique_id)
|
766
|
+
else
|
767
|
+
log.warn "${chunk_id} is not allowed in this plugin. Pass Chunk instead of metadata in extract_placeholders's 2nd argument"
|
768
|
+
end
|
769
|
+
}
|
770
|
+
end
|
771
|
+
end
|
772
|
+
|
773
|
+
def emit_events(tag, es)
|
774
|
+
# actually this method will be overwritten by #configure
|
775
|
+
if @buffering
|
776
|
+
emit_buffered(tag, es)
|
777
|
+
else
|
778
|
+
emit_sync(tag, es)
|
779
|
+
end
|
780
|
+
end
|
781
|
+
|
782
|
+
def emit_sync(tag, es)
|
783
|
+
@counters_monitor.synchronize{ @emit_count += 1 }
|
784
|
+
begin
|
785
|
+
process(tag, es)
|
786
|
+
@counters_monitor.synchronize{ @emit_records += es.size }
|
787
|
+
rescue
|
788
|
+
@counters_monitor.synchronize{ @num_errors += 1 }
|
789
|
+
raise
|
790
|
+
end
|
791
|
+
end
|
792
|
+
|
793
|
+
def emit_buffered(tag, es)
|
794
|
+
@counters_monitor.synchronize{ @emit_count += 1 }
|
795
|
+
begin
|
796
|
+
execute_chunking(tag, es, enqueue: (@flush_mode == :immediate))
|
797
|
+
if !@retry && @buffer.queued?
|
798
|
+
submit_flush_once
|
799
|
+
end
|
800
|
+
rescue
|
801
|
+
# TODO: separate number of errors into emit errors and write/flush errors
|
802
|
+
@counters_monitor.synchronize{ @num_errors += 1 }
|
803
|
+
raise
|
804
|
+
end
|
805
|
+
end
|
806
|
+
|
807
|
+
# TODO: optimize this code
|
808
|
+
def metadata(tag, time, record)
|
809
|
+
# this arguments are ordered in output plugin's rule
|
810
|
+
# Metadata 's argument order is different from this one (timekey, tag, variables)
|
811
|
+
|
812
|
+
raise ArgumentError, "tag must be a String: #{tag.class}" unless tag.nil? || tag.is_a?(String)
|
813
|
+
raise ArgumentError, "time must be a Fluent::EventTime (or Integer): #{time.class}" unless time.nil? || time.is_a?(Fluent::EventTime) || time.is_a?(Integer)
|
814
|
+
raise ArgumentError, "record must be a Hash: #{record.class}" unless record.nil? || record.is_a?(Hash)
|
815
|
+
|
816
|
+
if @chunk_keys.nil? && @chunk_key_time.nil? && @chunk_key_tag.nil?
|
817
|
+
# for tests
|
818
|
+
return Struct.new(:timekey, :tag, :variables).new
|
819
|
+
end
|
820
|
+
|
821
|
+
# timekey is int from epoch, and `timekey - timekey % 60` is assumed to mach with 0s of each minutes.
|
822
|
+
# it's wrong if timezone is configured as one which supports leap second, but it's very rare and
|
823
|
+
# we can ignore it (especially in production systems).
|
824
|
+
if @chunk_keys.empty?
|
825
|
+
if !@chunk_key_time && !@chunk_key_tag
|
826
|
+
@buffer.metadata()
|
827
|
+
elsif @chunk_key_time && @chunk_key_tag
|
828
|
+
timekey = calculate_timekey(time)
|
829
|
+
@buffer.metadata(timekey: timekey, tag: tag)
|
830
|
+
elsif @chunk_key_time
|
831
|
+
timekey = calculate_timekey(time)
|
832
|
+
@buffer.metadata(timekey: timekey)
|
833
|
+
else
|
834
|
+
@buffer.metadata(tag: tag)
|
835
|
+
end
|
836
|
+
else
|
837
|
+
timekey = if @chunk_key_time
|
838
|
+
calculate_timekey(time)
|
839
|
+
else
|
840
|
+
nil
|
841
|
+
end
|
842
|
+
pairs = Hash[@chunk_key_accessors.map { |k, a| [k, a.call(record)] }]
|
843
|
+
@buffer.metadata(timekey: timekey, tag: (@chunk_key_tag ? tag : nil), variables: pairs)
|
844
|
+
end
|
845
|
+
end
|
846
|
+
|
847
|
+
def calculate_timekey(time)
|
848
|
+
time_int = time.to_i
|
849
|
+
if @timekey_use_utc
|
850
|
+
(time_int - (time_int % @timekey)).to_i
|
851
|
+
else
|
852
|
+
offset = @calculate_offset ? @calculate_offset.call(time) : @offset
|
853
|
+
(time_int - ((time_int + offset)% @timekey)).to_i
|
854
|
+
end
|
855
|
+
end
|
856
|
+
|
857
|
+
def chunk_for_test(tag, time, record)
|
858
|
+
require 'fluent/plugin/buffer/memory_chunk'
|
859
|
+
|
860
|
+
m = metadata_for_test(tag, time, record)
|
861
|
+
Fluent::Plugin::Buffer::MemoryChunk.new(m)
|
862
|
+
end
|
863
|
+
|
864
|
+
def metadata_for_test(tag, time, record)
|
865
|
+
raise "BUG: #metadata_for_test is available only when no actual metadata exists" unless @buffer.metadata_list.empty?
|
866
|
+
m = metadata(tag, time, record)
|
867
|
+
@buffer.metadata_list_clear!
|
868
|
+
m
|
869
|
+
end
|
870
|
+
|
871
|
+
def execute_chunking(tag, es, enqueue: false)
|
872
|
+
if @simple_chunking
|
873
|
+
handle_stream_simple(tag, es, enqueue: enqueue)
|
874
|
+
elsif @custom_format
|
875
|
+
handle_stream_with_custom_format(tag, es, enqueue: enqueue)
|
876
|
+
else
|
877
|
+
handle_stream_with_standard_format(tag, es, enqueue: enqueue)
|
878
|
+
end
|
879
|
+
end
|
880
|
+
|
881
|
+
def write_guard(&block)
|
882
|
+
begin
|
883
|
+
block.call
|
884
|
+
rescue Fluent::Plugin::Buffer::BufferOverflowError
|
885
|
+
log.warn "failed to write data into buffer by buffer overflow", action: @buffer_config.overflow_action
|
886
|
+
case @buffer_config.overflow_action
|
887
|
+
when :throw_exception
|
888
|
+
raise
|
889
|
+
when :block
|
890
|
+
log.debug "buffer.write is now blocking"
|
891
|
+
until @buffer.storable?
|
892
|
+
if self.stopped?
|
893
|
+
log.error "breaking block behavior to shutdown Fluentd"
|
894
|
+
# to break infinite loop to exit Fluentd process
|
895
|
+
raise
|
896
|
+
end
|
897
|
+
log.trace "sleeping until buffer can store more data"
|
898
|
+
sleep 1
|
899
|
+
end
|
900
|
+
log.debug "retrying buffer.write after blocked operation"
|
901
|
+
retry
|
902
|
+
when :drop_oldest_chunk
|
903
|
+
begin
|
904
|
+
oldest = @buffer.dequeue_chunk
|
905
|
+
if oldest
|
906
|
+
log.warn "dropping oldest chunk to make space after buffer overflow", chunk_id: dump_unique_id_hex(oldest.unique_id)
|
907
|
+
@buffer.purge_chunk(oldest.unique_id)
|
908
|
+
else
|
909
|
+
log.error "no queued chunks to be dropped for drop_oldest_chunk"
|
910
|
+
end
|
911
|
+
rescue
|
912
|
+
# ignore any errors
|
913
|
+
end
|
914
|
+
raise unless @buffer.storable?
|
915
|
+
retry
|
916
|
+
else
|
917
|
+
raise "BUG: unknown overflow_action '#{@buffer_config.overflow_action}'"
|
918
|
+
end
|
919
|
+
end
|
920
|
+
end
|
921
|
+
|
922
|
+
FORMAT_MSGPACK_STREAM = ->(e){ e.to_msgpack_stream }
|
923
|
+
FORMAT_COMPRESSED_MSGPACK_STREAM = ->(e){ e.to_compressed_msgpack_stream }
|
924
|
+
FORMAT_MSGPACK_STREAM_TIME_INT = ->(e){ e.to_msgpack_stream(time_int: true) }
|
925
|
+
FORMAT_COMPRESSED_MSGPACK_STREAM_TIME_INT = ->(e){ e.to_compressed_msgpack_stream(time_int: true) }
|
926
|
+
|
927
|
+
def generate_format_proc
|
928
|
+
if @buffer && @buffer.compress == :gzip
|
929
|
+
@time_as_integer ? FORMAT_COMPRESSED_MSGPACK_STREAM_TIME_INT : FORMAT_COMPRESSED_MSGPACK_STREAM
|
930
|
+
else
|
931
|
+
@time_as_integer ? FORMAT_MSGPACK_STREAM_TIME_INT : FORMAT_MSGPACK_STREAM
|
932
|
+
end
|
933
|
+
end
|
934
|
+
|
935
|
+
# metadata_and_data is a Hash of:
|
936
|
+
# (standard format) metadata => event stream
|
937
|
+
# (custom format) metadata => array of formatted event
|
938
|
+
# For standard format, formatting should be done for whole event stream, but
|
939
|
+
# "whole event stream" may be a split of "es" here when it's bigger than chunk_limit_size.
|
940
|
+
# `@buffer.write` will do this splitting.
|
941
|
+
# For custom format, formatting will be done here. Custom formatting always requires
|
942
|
+
# iteration of event stream, and it should be done just once even if total event stream size
|
943
|
+
# is bigger than chunk_limit_size because of performance.
|
944
|
+
def handle_stream_with_custom_format(tag, es, enqueue: false)
|
945
|
+
meta_and_data = {}
|
946
|
+
records = 0
|
947
|
+
es.each do |time, record|
|
948
|
+
meta = metadata(tag, time, record)
|
949
|
+
meta_and_data[meta] ||= []
|
950
|
+
res = format(tag, time, record)
|
951
|
+
if res
|
952
|
+
meta_and_data[meta] << res
|
953
|
+
records += 1
|
954
|
+
end
|
955
|
+
end
|
956
|
+
write_guard do
|
957
|
+
@buffer.write(meta_and_data, enqueue: enqueue)
|
958
|
+
end
|
959
|
+
@counters_monitor.synchronize{ @emit_records += records }
|
960
|
+
true
|
961
|
+
end
|
962
|
+
|
963
|
+
def handle_stream_with_standard_format(tag, es, enqueue: false)
|
964
|
+
format_proc = generate_format_proc
|
965
|
+
meta_and_data = {}
|
966
|
+
records = 0
|
967
|
+
es.each do |time, record|
|
968
|
+
meta = metadata(tag, time, record)
|
969
|
+
meta_and_data[meta] ||= MultiEventStream.new
|
970
|
+
meta_and_data[meta].add(time, record)
|
971
|
+
records += 1
|
972
|
+
end
|
973
|
+
write_guard do
|
974
|
+
@buffer.write(meta_and_data, format: format_proc, enqueue: enqueue)
|
975
|
+
end
|
976
|
+
@counters_monitor.synchronize{ @emit_records += records }
|
977
|
+
true
|
978
|
+
end
|
979
|
+
|
980
|
+
def handle_stream_simple(tag, es, enqueue: false)
|
981
|
+
format_proc = nil
|
982
|
+
meta = metadata((@chunk_key_tag ? tag : nil), nil, nil)
|
983
|
+
records = es.size
|
984
|
+
if @custom_format
|
985
|
+
records = 0
|
986
|
+
data = []
|
987
|
+
es.each do |time, record|
|
988
|
+
res = format(tag, time, record)
|
989
|
+
if res
|
990
|
+
data << res
|
991
|
+
records += 1
|
992
|
+
end
|
993
|
+
end
|
994
|
+
else
|
995
|
+
format_proc = generate_format_proc
|
996
|
+
data = es
|
997
|
+
end
|
998
|
+
write_guard do
|
999
|
+
@buffer.write({meta => data}, format: format_proc, enqueue: enqueue)
|
1000
|
+
end
|
1001
|
+
@counters_monitor.synchronize{ @emit_records += records }
|
1002
|
+
true
|
1003
|
+
end
|
1004
|
+
|
1005
|
+
def commit_write(chunk_id, delayed: @delayed_commit, secondary: false)
|
1006
|
+
log.on_trace { log.trace "committing write operation to a chunk", chunk: dump_unique_id_hex(chunk_id), delayed: delayed }
|
1007
|
+
|
1008
|
+
if delayed
|
1009
|
+
@dequeued_chunks_mutex.synchronize do
|
1010
|
+
@dequeued_chunks.delete_if{ |info| info.chunk_id == chunk_id }
|
1011
|
+
end
|
1012
|
+
end
|
1013
|
+
@buffer.purge_chunk(chunk_id)
|
1014
|
+
|
1015
|
+
@retry_mutex.synchronize do
|
1016
|
+
if @retry # success to flush chunks in retries
|
1017
|
+
if secondary
|
1018
|
+
log.warn "retry succeeded by secondary.", chunk_id: dump_unique_id_hex(chunk_id)
|
1019
|
+
else
|
1020
|
+
log.warn "retry succeeded.", chunk_id: dump_unique_id_hex(chunk_id)
|
1021
|
+
end
|
1022
|
+
@retry = nil
|
1023
|
+
end
|
1024
|
+
end
|
1025
|
+
end
|
1026
|
+
|
1027
|
+
# update_retry parameter is for preventing busy loop by async write
|
1028
|
+
# We will remove this parameter by re-design retry_state management between threads.
|
1029
|
+
def rollback_write(chunk_id, update_retry: true)
|
1030
|
+
# This API is to rollback chunks explicitly from plugins.
|
1031
|
+
# 3rd party plugins can depend it on automatic rollback of #try_rollback_write
|
1032
|
+
@dequeued_chunks_mutex.synchronize do
|
1033
|
+
@dequeued_chunks.delete_if{ |info| info.chunk_id == chunk_id }
|
1034
|
+
end
|
1035
|
+
# returns true if chunk was rollbacked as expected
|
1036
|
+
# false if chunk was already flushed and couldn't be rollbacked unexpectedly
|
1037
|
+
# in many cases, false can be just ignored
|
1038
|
+
if @buffer.takeback_chunk(chunk_id)
|
1039
|
+
@counters_monitor.synchronize{ @rollback_count += 1 }
|
1040
|
+
if update_retry
|
1041
|
+
primary = @as_secondary ? @primary_instance : self
|
1042
|
+
primary.update_retry_state(chunk_id, @as_secondary)
|
1043
|
+
end
|
1044
|
+
true
|
1045
|
+
else
|
1046
|
+
false
|
1047
|
+
end
|
1048
|
+
end
|
1049
|
+
|
1050
|
+
def try_rollback_write
|
1051
|
+
@dequeued_chunks_mutex.synchronize do
|
1052
|
+
while @dequeued_chunks.first && @dequeued_chunks.first.expired?
|
1053
|
+
info = @dequeued_chunks.shift
|
1054
|
+
if @buffer.takeback_chunk(info.chunk_id)
|
1055
|
+
@counters_monitor.synchronize{ @rollback_count += 1 }
|
1056
|
+
log.warn "failed to flush the buffer chunk, timeout to commit.", chunk_id: dump_unique_id_hex(info.chunk_id), flushed_at: info.time
|
1057
|
+
primary = @as_secondary ? @primary_instance : self
|
1058
|
+
primary.update_retry_state(info.chunk_id, @as_secondary)
|
1059
|
+
end
|
1060
|
+
end
|
1061
|
+
end
|
1062
|
+
end
|
1063
|
+
|
1064
|
+
def try_rollback_all
|
1065
|
+
return unless @dequeued_chunks
|
1066
|
+
@dequeued_chunks_mutex.synchronize do
|
1067
|
+
until @dequeued_chunks.empty?
|
1068
|
+
info = @dequeued_chunks.shift
|
1069
|
+
if @buffer.takeback_chunk(info.chunk_id)
|
1070
|
+
@counters_monitor.synchronize{ @rollback_count += 1 }
|
1071
|
+
log.info "delayed commit for buffer chunks was cancelled in shutdown", chunk_id: dump_unique_id_hex(info.chunk_id)
|
1072
|
+
primary = @as_secondary ? @primary_instance : self
|
1073
|
+
primary.update_retry_state(info.chunk_id, @as_secondary)
|
1074
|
+
end
|
1075
|
+
end
|
1076
|
+
end
|
1077
|
+
end
|
1078
|
+
|
1079
|
+
def next_flush_time
|
1080
|
+
if @buffer.queued?
|
1081
|
+
@retry_mutex.synchronize do
|
1082
|
+
@retry ? @retry.next_time : Time.now + @buffer_config.flush_thread_burst_interval
|
1083
|
+
end
|
1084
|
+
else
|
1085
|
+
Time.now + @buffer_config.flush_thread_interval
|
1086
|
+
end
|
1087
|
+
end
|
1088
|
+
|
1089
|
+
UNRECOVERABLE_ERRORS = [Fluent::UnrecoverableError, TypeError, ArgumentError, NoMethodError, MessagePack::UnpackError]
|
1090
|
+
|
1091
|
+
def try_flush
|
1092
|
+
chunk = @buffer.dequeue_chunk
|
1093
|
+
return unless chunk
|
1094
|
+
|
1095
|
+
log.on_trace { log.trace "trying flush for a chunk", chunk: dump_unique_id_hex(chunk.unique_id) }
|
1096
|
+
|
1097
|
+
output = self
|
1098
|
+
using_secondary = false
|
1099
|
+
if @retry_mutex.synchronize{ @retry && @retry.secondary? }
|
1100
|
+
output = @secondary
|
1101
|
+
using_secondary = true
|
1102
|
+
end
|
1103
|
+
|
1104
|
+
if @enable_msgpack_streamer
|
1105
|
+
chunk.extend ChunkMessagePackEventStreamer
|
1106
|
+
end
|
1107
|
+
|
1108
|
+
begin
|
1109
|
+
chunk_write_start = Fluent::Clock.now
|
1110
|
+
|
1111
|
+
if output.delayed_commit
|
1112
|
+
log.trace "executing delayed write and commit", chunk: dump_unique_id_hex(chunk.unique_id)
|
1113
|
+
@counters_monitor.synchronize{ @write_count += 1 }
|
1114
|
+
@dequeued_chunks_mutex.synchronize do
|
1115
|
+
# delayed_commit_timeout for secondary is configured in <buffer> of primary (<secondary> don't get <buffer>)
|
1116
|
+
@dequeued_chunks << DequeuedChunkInfo.new(chunk.unique_id, Time.now, self.delayed_commit_timeout)
|
1117
|
+
end
|
1118
|
+
|
1119
|
+
output.try_write(chunk)
|
1120
|
+
check_slow_flush(chunk_write_start)
|
1121
|
+
else # output plugin without delayed purge
|
1122
|
+
chunk_id = chunk.unique_id
|
1123
|
+
dump_chunk_id = dump_unique_id_hex(chunk_id)
|
1124
|
+
log.trace "adding write count", instance: self.object_id
|
1125
|
+
@counters_monitor.synchronize{ @write_count += 1 }
|
1126
|
+
log.trace "executing sync write", chunk: dump_chunk_id
|
1127
|
+
|
1128
|
+
output.write(chunk)
|
1129
|
+
check_slow_flush(chunk_write_start)
|
1130
|
+
|
1131
|
+
log.trace "write operation done, committing", chunk: dump_chunk_id
|
1132
|
+
commit_write(chunk_id, delayed: false, secondary: using_secondary)
|
1133
|
+
log.trace "done to commit a chunk", chunk: dump_chunk_id
|
1134
|
+
end
|
1135
|
+
rescue *UNRECOVERABLE_ERRORS => e
|
1136
|
+
if @secondary
|
1137
|
+
if using_secondary
|
1138
|
+
log.warn "got unrecoverable error in secondary.", error: e
|
1139
|
+
log.warn_backtrace
|
1140
|
+
backup_chunk(chunk, using_secondary, output.delayed_commit)
|
1141
|
+
else
|
1142
|
+
if (self.class == @secondary.class)
|
1143
|
+
log.warn "got unrecoverable error in primary and secondary type is same as primary. Skip secondary", error: e
|
1144
|
+
log.warn_backtrace
|
1145
|
+
backup_chunk(chunk, using_secondary, output.delayed_commit)
|
1146
|
+
else
|
1147
|
+
# Call secondary output directly without retry update.
|
1148
|
+
# In this case, delayed commit causes inconsistent state in dequeued chunks so async output in secondary is not allowed for now.
|
1149
|
+
if @secondary.delayed_commit
|
1150
|
+
log.warn "got unrecoverable error in primary and secondary is async output. Skip secondary for backup", error: e
|
1151
|
+
log.warn_backtrace
|
1152
|
+
backup_chunk(chunk, using_secondary, output.delayed_commit)
|
1153
|
+
else
|
1154
|
+
log.warn "got unrecoverable error in primary. Skip retry and flush chunk to secondary", error: e
|
1155
|
+
log.warn_backtrace
|
1156
|
+
begin
|
1157
|
+
@secondary.write(chunk)
|
1158
|
+
commit_write(chunk_id, delayed: output.delayed_commit, secondary: true)
|
1159
|
+
rescue => e
|
1160
|
+
log.warn "got an error in secondary for unrecoverable error", error: e
|
1161
|
+
log.warn_backtrace
|
1162
|
+
backup_chunk(chunk, using_secondary, output.delayed_commit)
|
1163
|
+
end
|
1164
|
+
end
|
1165
|
+
end
|
1166
|
+
end
|
1167
|
+
else
|
1168
|
+
log.warn "got unrecoverable error in primary and no secondary", error: e
|
1169
|
+
log.warn_backtrace
|
1170
|
+
backup_chunk(chunk, using_secondary, output.delayed_commit)
|
1171
|
+
end
|
1172
|
+
rescue => e
|
1173
|
+
log.debug "taking back chunk for errors.", chunk: dump_unique_id_hex(chunk.unique_id)
|
1174
|
+
if output.delayed_commit
|
1175
|
+
@dequeued_chunks_mutex.synchronize do
|
1176
|
+
@dequeued_chunks.delete_if{|d| d.chunk_id == chunk.unique_id }
|
1177
|
+
end
|
1178
|
+
end
|
1179
|
+
|
1180
|
+
if @buffer.takeback_chunk(chunk.unique_id)
|
1181
|
+
@counters_monitor.synchronize { @rollback_count += 1 }
|
1182
|
+
end
|
1183
|
+
|
1184
|
+
update_retry_state(chunk.unique_id, using_secondary, e)
|
1185
|
+
|
1186
|
+
raise if @under_plugin_development && !@retry_for_error_chunk
|
1187
|
+
end
|
1188
|
+
end
|
1189
|
+
|
1190
|
+
def backup_chunk(chunk, using_secondary, delayed_commit)
|
1191
|
+
if @buffer_config.disable_chunk_backup
|
1192
|
+
log.warn "disable_chunk_backup is true. #{dump_unique_id_hex(chunk.unique_id)} chunk is thrown away"
|
1193
|
+
else
|
1194
|
+
unique_id = dump_unique_id_hex(chunk.unique_id)
|
1195
|
+
safe_plugin_id = plugin_id.gsub(/[ "\/\\:;|*<>?]/, '_')
|
1196
|
+
backup_base_dir = system_config.root_dir || DEFAULT_BACKUP_DIR
|
1197
|
+
backup_file = File.join(backup_base_dir, 'backup', "worker#{fluentd_worker_id}", safe_plugin_id, "#{unique_id}.log")
|
1198
|
+
backup_dir = File.dirname(backup_file)
|
1199
|
+
|
1200
|
+
log.warn "bad chunk is moved to #{backup_file}"
|
1201
|
+
FileUtils.mkdir_p(backup_dir) unless Dir.exist?(backup_dir)
|
1202
|
+
File.open(backup_file, 'ab', system_config.file_permission || 0644) { |f|
|
1203
|
+
chunk.write_to(f)
|
1204
|
+
}
|
1205
|
+
end
|
1206
|
+
commit_write(chunk.unique_id, secondary: using_secondary, delayed: delayed_commit)
|
1207
|
+
end
|
1208
|
+
|
1209
|
+
def check_slow_flush(start)
|
1210
|
+
elapsed_time = Fluent::Clock.now - start
|
1211
|
+
elapsed_millsec = (elapsed_time * 1000).to_i
|
1212
|
+
@counters_monitor.synchronize { @flush_time_count += elapsed_millsec }
|
1213
|
+
if elapsed_time > @slow_flush_log_threshold
|
1214
|
+
@counters_monitor.synchronize { @slow_flush_count += 1 }
|
1215
|
+
log.warn "buffer flush took longer time than slow_flush_log_threshold:",
|
1216
|
+
elapsed_time: elapsed_time, slow_flush_log_threshold: @slow_flush_log_threshold, plugin_id: self.plugin_id
|
1217
|
+
end
|
1218
|
+
end
|
1219
|
+
|
1220
|
+
def update_retry_state(chunk_id, using_secondary, error = nil)
|
1221
|
+
@retry_mutex.synchronize do
|
1222
|
+
@counters_monitor.synchronize{ @num_errors += 1 }
|
1223
|
+
chunk_id_hex = dump_unique_id_hex(chunk_id)
|
1224
|
+
|
1225
|
+
unless @retry
|
1226
|
+
@retry = retry_state(@buffer_config.retry_randomize)
|
1227
|
+
if error
|
1228
|
+
log.warn "failed to flush the buffer.", retry_time: @retry.steps, next_retry_seconds: @retry.next_time, chunk: chunk_id_hex, error: error
|
1229
|
+
log.warn_backtrace error.backtrace
|
1230
|
+
end
|
1231
|
+
return
|
1232
|
+
end
|
1233
|
+
|
1234
|
+
# @retry exists
|
1235
|
+
|
1236
|
+
if @retry.limit?
|
1237
|
+
if error
|
1238
|
+
records = @buffer.queued_records
|
1239
|
+
msg = "failed to flush the buffer, and hit limit for retries. dropping all chunks in the buffer queue."
|
1240
|
+
log.error msg, retry_times: @retry.steps, records: records, error: error
|
1241
|
+
log.error_backtrace error.backtrace
|
1242
|
+
end
|
1243
|
+
@buffer.clear_queue!
|
1244
|
+
log.debug "buffer queue cleared"
|
1245
|
+
@retry = nil
|
1246
|
+
else
|
1247
|
+
@retry.step
|
1248
|
+
if error
|
1249
|
+
if using_secondary
|
1250
|
+
msg = "failed to flush the buffer with secondary output."
|
1251
|
+
log.warn msg, retry_time: @retry.steps, next_retry_seconds: @retry.next_time, chunk: chunk_id_hex, error: error
|
1252
|
+
log.warn_backtrace error.backtrace
|
1253
|
+
else
|
1254
|
+
msg = "failed to flush the buffer."
|
1255
|
+
log.warn msg, retry_time: @retry.steps, next_retry_seconds: @retry.next_time, chunk: chunk_id_hex, error: error
|
1256
|
+
log.warn_backtrace error.backtrace
|
1257
|
+
end
|
1258
|
+
end
|
1259
|
+
end
|
1260
|
+
end
|
1261
|
+
end
|
1262
|
+
|
1263
|
+
def retry_state(randomize)
|
1264
|
+
if @secondary
|
1265
|
+
retry_state_create(
|
1266
|
+
:output_retries, @buffer_config.retry_type, @buffer_config.retry_wait, @buffer_config.retry_timeout,
|
1267
|
+
forever: @buffer_config.retry_forever, max_steps: @buffer_config.retry_max_times, backoff_base: @buffer_config.retry_exponential_backoff_base,
|
1268
|
+
max_interval: @buffer_config.retry_max_interval,
|
1269
|
+
secondary: true, secondary_threshold: @buffer_config.retry_secondary_threshold,
|
1270
|
+
randomize: randomize
|
1271
|
+
)
|
1272
|
+
else
|
1273
|
+
retry_state_create(
|
1274
|
+
:output_retries, @buffer_config.retry_type, @buffer_config.retry_wait, @buffer_config.retry_timeout,
|
1275
|
+
forever: @buffer_config.retry_forever, max_steps: @buffer_config.retry_max_times, backoff_base: @buffer_config.retry_exponential_backoff_base,
|
1276
|
+
max_interval: @buffer_config.retry_max_interval,
|
1277
|
+
randomize: randomize
|
1278
|
+
)
|
1279
|
+
end
|
1280
|
+
end
|
1281
|
+
|
1282
|
+
def submit_flush_once
|
1283
|
+
# Without locks: it is rough but enough to select "next" writer selection
|
1284
|
+
@output_flush_thread_current_position = (@output_flush_thread_current_position + 1) % @buffer_config.flush_thread_count
|
1285
|
+
state = @output_flush_threads[@output_flush_thread_current_position]
|
1286
|
+
state.mutex.synchronize {
|
1287
|
+
if state.thread && state.thread.status # "run"/"sleep"/"aborting" or false(successfully stop) or nil(killed by exception)
|
1288
|
+
state.next_clock = 0
|
1289
|
+
state.cond_var.signal
|
1290
|
+
else
|
1291
|
+
log.warn "thread is already dead"
|
1292
|
+
end
|
1293
|
+
}
|
1294
|
+
Thread.pass
|
1295
|
+
end
|
1296
|
+
|
1297
|
+
def force_flush
|
1298
|
+
if @buffering
|
1299
|
+
@buffer.enqueue_all(true)
|
1300
|
+
submit_flush_all
|
1301
|
+
end
|
1302
|
+
end
|
1303
|
+
|
1304
|
+
def submit_flush_all
|
1305
|
+
while !@retry && @buffer.queued?
|
1306
|
+
submit_flush_once
|
1307
|
+
sleep @buffer_config.flush_thread_burst_interval
|
1308
|
+
end
|
1309
|
+
end
|
1310
|
+
|
1311
|
+
# only for tests of output plugin
|
1312
|
+
def interrupt_flushes
|
1313
|
+
@output_flush_interrupted = true
|
1314
|
+
end
|
1315
|
+
|
1316
|
+
# only for tests of output plugin
|
1317
|
+
def enqueue_thread_wait
|
1318
|
+
@output_enqueue_thread_mutex.synchronize do
|
1319
|
+
@output_flush_interrupted = false
|
1320
|
+
@output_enqueue_thread_waiting = true
|
1321
|
+
end
|
1322
|
+
require 'timeout'
|
1323
|
+
Timeout.timeout(10) do
|
1324
|
+
Thread.pass while @output_enqueue_thread_waiting
|
1325
|
+
end
|
1326
|
+
end
|
1327
|
+
|
1328
|
+
# only for tests of output plugin
|
1329
|
+
def flush_thread_wakeup
|
1330
|
+
@output_flush_threads.each do |state|
|
1331
|
+
state.mutex.synchronize {
|
1332
|
+
if state.thread && state.thread.status
|
1333
|
+
state.next_clock = 0
|
1334
|
+
state.cond_var.signal
|
1335
|
+
end
|
1336
|
+
}
|
1337
|
+
Thread.pass
|
1338
|
+
end
|
1339
|
+
end
|
1340
|
+
|
1341
|
+
def enqueue_thread_run
|
1342
|
+
value_for_interval = nil
|
1343
|
+
if @flush_mode == :interval
|
1344
|
+
value_for_interval = @buffer_config.flush_interval
|
1345
|
+
end
|
1346
|
+
if @chunk_key_time
|
1347
|
+
if !value_for_interval || @buffer_config.timekey < value_for_interval
|
1348
|
+
value_for_interval = [@buffer_config.timekey, @buffer_config.timekey_wait].min
|
1349
|
+
end
|
1350
|
+
end
|
1351
|
+
unless value_for_interval
|
1352
|
+
raise "BUG: both of flush_interval and timekey are disabled"
|
1353
|
+
end
|
1354
|
+
interval = value_for_interval / 11.0
|
1355
|
+
if interval < @buffer_config.flush_thread_interval
|
1356
|
+
interval = @buffer_config.flush_thread_interval
|
1357
|
+
end
|
1358
|
+
|
1359
|
+
while !self.after_started? && !self.stopped?
|
1360
|
+
sleep 0.5
|
1361
|
+
end
|
1362
|
+
log.debug "enqueue_thread actually running"
|
1363
|
+
|
1364
|
+
begin
|
1365
|
+
while @output_enqueue_thread_running
|
1366
|
+
now_int = Time.now.to_i
|
1367
|
+
if @output_flush_interrupted
|
1368
|
+
sleep interval
|
1369
|
+
next
|
1370
|
+
end
|
1371
|
+
|
1372
|
+
@output_enqueue_thread_mutex.lock
|
1373
|
+
begin
|
1374
|
+
if @flush_mode == :interval
|
1375
|
+
flush_interval = @buffer_config.flush_interval.to_i
|
1376
|
+
# This block should be done by integer values.
|
1377
|
+
# If both of flush_interval & flush_thread_interval are 1s, expected actual flush timing is 1.5s.
|
1378
|
+
# If we use integered values for this comparison, expected actual flush timing is 1.0s.
|
1379
|
+
@buffer.enqueue_all{ |metadata, chunk| chunk.created_at.to_i + flush_interval <= now_int }
|
1380
|
+
end
|
1381
|
+
|
1382
|
+
if @chunk_key_time
|
1383
|
+
timekey_unit = @buffer_config.timekey
|
1384
|
+
timekey_wait = @buffer_config.timekey_wait
|
1385
|
+
current_timekey = now_int - now_int % timekey_unit
|
1386
|
+
@buffer.enqueue_all{ |metadata, chunk| metadata.timekey < current_timekey && metadata.timekey + timekey_unit + timekey_wait <= now_int }
|
1387
|
+
end
|
1388
|
+
rescue => e
|
1389
|
+
raise if @under_plugin_development
|
1390
|
+
log.error "unexpected error while checking flushed chunks. ignored.", error: e
|
1391
|
+
log.error_backtrace
|
1392
|
+
ensure
|
1393
|
+
@output_enqueue_thread_waiting = false
|
1394
|
+
@output_enqueue_thread_mutex.unlock
|
1395
|
+
end
|
1396
|
+
sleep interval
|
1397
|
+
end
|
1398
|
+
rescue => e
|
1399
|
+
# normal errors are rescued by inner begin-rescue clause.
|
1400
|
+
log.error "error on enqueue thread", error: e
|
1401
|
+
log.error_backtrace
|
1402
|
+
raise
|
1403
|
+
end
|
1404
|
+
end
|
1405
|
+
|
1406
|
+
def flush_thread_run(state)
|
1407
|
+
flush_thread_interval = @buffer_config.flush_thread_interval
|
1408
|
+
|
1409
|
+
state.next_clock = Fluent::Clock.now + flush_thread_interval
|
1410
|
+
|
1411
|
+
while !self.after_started? && !self.stopped?
|
1412
|
+
sleep 0.5
|
1413
|
+
end
|
1414
|
+
log.debug "flush_thread actually running"
|
1415
|
+
|
1416
|
+
state.mutex.lock
|
1417
|
+
begin
|
1418
|
+
# This thread don't use `thread_current_running?` because this thread should run in `before_shutdown` phase
|
1419
|
+
while @output_flush_threads_running
|
1420
|
+
current_clock = Fluent::Clock.now
|
1421
|
+
next_retry_time = nil
|
1422
|
+
|
1423
|
+
@retry_mutex.synchronize do
|
1424
|
+
next_retry_time = @retry ? @retry.next_time : nil
|
1425
|
+
end
|
1426
|
+
|
1427
|
+
if state.next_clock > current_clock
|
1428
|
+
interval = state.next_clock - current_clock
|
1429
|
+
elsif next_retry_time && next_retry_time > Time.now
|
1430
|
+
interval = next_retry_time.to_f - Time.now.to_f
|
1431
|
+
else
|
1432
|
+
state.mutex.unlock
|
1433
|
+
begin
|
1434
|
+
try_flush
|
1435
|
+
# next_flush_time uses flush_thread_interval or flush_thread_burst_interval (or retrying)
|
1436
|
+
interval = next_flush_time.to_f - Time.now.to_f
|
1437
|
+
# TODO: if secondary && delayed-commit, next_flush_time will be much longer than expected
|
1438
|
+
# because @retry still exists (#commit_write is not called yet in #try_flush)
|
1439
|
+
# @retry should be cleared if delayed commit is enabled? Or any other solution?
|
1440
|
+
state.next_clock = Fluent::Clock.now + interval
|
1441
|
+
ensure
|
1442
|
+
state.mutex.lock
|
1443
|
+
end
|
1444
|
+
end
|
1445
|
+
|
1446
|
+
if @dequeued_chunks_mutex.synchronize{ !@dequeued_chunks.empty? && @dequeued_chunks.first.expired? }
|
1447
|
+
unless @output_flush_interrupted
|
1448
|
+
state.mutex.unlock
|
1449
|
+
begin
|
1450
|
+
try_rollback_write
|
1451
|
+
ensure
|
1452
|
+
state.mutex.lock
|
1453
|
+
end
|
1454
|
+
end
|
1455
|
+
end
|
1456
|
+
|
1457
|
+
state.cond_var.wait(state.mutex, interval) if interval > 0
|
1458
|
+
end
|
1459
|
+
rescue => e
|
1460
|
+
# normal errors are rescued by output plugins in #try_flush
|
1461
|
+
# so this rescue section is for critical & unrecoverable errors
|
1462
|
+
log.error "error on output thread", error: e
|
1463
|
+
log.error_backtrace
|
1464
|
+
raise
|
1465
|
+
ensure
|
1466
|
+
state.mutex.unlock
|
1467
|
+
end
|
1468
|
+
end
|
1469
|
+
|
1470
|
+
BUFFER_STATS_KEYS = {}
|
1471
|
+
Fluent::Plugin::Buffer::STATS_KEYS.each { |key|
|
1472
|
+
BUFFER_STATS_KEYS[key] = "buffer_#{key}"
|
1473
|
+
}
|
1474
|
+
|
1475
|
+
def statistics
|
1476
|
+
stats = {
|
1477
|
+
'emit_records' => @emit_records,
|
1478
|
+
# Respect original name
|
1479
|
+
# https://github.com/fluent/fluentd/blob/45c7b75ba77763eaf87136864d4942c4e0c5bfcd/lib/fluent/plugin/in_monitor_agent.rb#L284
|
1480
|
+
'retry_count' => @num_errors,
|
1481
|
+
'emit_count' => @emit_count,
|
1482
|
+
'write_count' => @write_count,
|
1483
|
+
'rollback_count' => @rollback_count,
|
1484
|
+
'slow_flush_count' => @slow_flush_count,
|
1485
|
+
'flush_time_count' => @flush_time_count,
|
1486
|
+
}
|
1487
|
+
|
1488
|
+
if @buffer && @buffer.respond_to?(:statistics)
|
1489
|
+
(@buffer.statistics['buffer'] || {}).each do |k, v|
|
1490
|
+
stats[BUFFER_STATS_KEYS[k]] = v
|
1491
|
+
end
|
1492
|
+
end
|
1493
|
+
|
1494
|
+
{ 'output' => stats }
|
1495
|
+
end
|
1496
|
+
end
|
1497
|
+
end
|
1498
|
+
end
|