logstash-output-jdbc 0.2.10 → 5.4.0
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/CHANGELOG.md +64 -0
- data/Gemfile +11 -0
- data/README.md +54 -30
- data/THANKS.md +18 -0
- data/lib/com/zaxxer/HikariCP/2.4.7/HikariCP-2.4.7.jar +0 -0
- data/lib/com/zaxxer/HikariCP/2.7.2/HikariCP-2.7.2.jar +0 -0
- data/lib/logstash-output-jdbc_jars.rb +2 -2
- data/lib/logstash/outputs/jdbc.rb +163 -163
- data/lib/org/apache/logging/log4j/log4j-api/2.6.2/log4j-api-2.6.2.jar +0 -0
- data/lib/org/apache/logging/log4j/log4j-api/2.9.1/log4j-api-2.9.1.jar +0 -0
- data/lib/org/apache/logging/log4j/log4j-core/2.9.1/log4j-core-2.9.1.jar +0 -0
- data/lib/org/apache/logging/log4j/log4j-slf4j-impl/2.6.2/log4j-slf4j-impl-2.6.2.jar +0 -0
- data/lib/org/apache/logging/log4j/log4j-slf4j-impl/2.9.1/log4j-slf4j-impl-2.9.1.jar +0 -0
- data/lib/org/slf4j/slf4j-api/1.7.21/slf4j-api-1.7.21.jar +0 -0
- data/lib/org/slf4j/slf4j-api/1.7.25/slf4j-api-1.7.25.jar +0 -0
- data/logstash-output-jdbc.gemspec +32 -0
- data/spec/jdbc_spec_helper.rb +216 -0
- data/spec/outputs/jdbc_derby_spec.rb +33 -0
- data/spec/outputs/jdbc_mysql_spec.rb +24 -0
- data/spec/outputs/jdbc_postgres_spec.rb +41 -0
- data/spec/outputs/jdbc_spec.rb +4 -87
- data/spec/outputs/jdbc_sqlite_spec.rb +26 -0
- data/vendor/jar-dependencies/runtime-jars/HikariCP-2.7.2.jar +0 -0
- data/vendor/jar-dependencies/runtime-jars/log4j-api-2.6.2.jar +0 -0
- data/vendor/jar-dependencies/runtime-jars/log4j-slf4j-impl-2.6.2.jar +0 -0
- data/vendor/jar-dependencies/runtime-jars/slf4j-api-1.7.25.jar +0 -0
- metadata +94 -53
- data/LICENSE.txt +0 -21
- data/lib/com/zaxxer/HikariCP/2.4.2/HikariCP-2.4.2.jar +0 -0
- data/lib/com/zaxxer/HikariCP/2.4.6/HikariCP-2.4.6.jar +0 -0
- data/lib/logstash-output-jdbc_ring-buffer.rb +0 -17
- data/lib/org/slf4j/slf4j-api/1.7.12/slf4j-api-1.7.12.jar +0 -0
- data/lib/org/slf4j/slf4j-api/1.7.16/slf4j-api-1.7.16.jar +0 -0
- data/lib/org/slf4j/slf4j-log4j12/1.7.13/slf4j-log4j12-1.7.13.jar +0 -0
- data/vendor/jar-dependencies/runtime-jars/HikariCP-2.4.2.jar +0 -0
- data/vendor/jar-dependencies/runtime-jars/HikariCP-2.4.6.jar +0 -0
- data/vendor/jar-dependencies/runtime-jars/log4j-1.2.17.jar +0 -0
- data/vendor/jar-dependencies/runtime-jars/slf4j-api-1.7.13.jar +0 -0
- data/vendor/jar-dependencies/runtime-jars/slf4j-api-1.7.16.jar +0 -0
- data/vendor/jar-dependencies/runtime-jars/slf4j-log4j12-1.7.21.jar +0 -0
- data/vendor/jar-dependencies/runtime-jars/slf4j-nop-1.7.13.jar +0 -0
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA1:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 2c9b504aa52e7be8ac8e76a75e925332a1978bdc
|
4
|
+
data.tar.gz: 6acb7adc74e8315ec06981677cc58e7cf9b25009
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: cad42066d35d1b1f2457a7aa5a12c5e28471786830a4ec97031faccbddcda2bd1409a61dd7f9e9ffa959a1bf403601ddf3722bf64eec82dcf8263c9027cfa011
|
7
|
+
data.tar.gz: d0cea89d20fef7ccb53fd826bde6e7edd26b523aaa8d484d509171f7d1b7938b2e11b95364d76e2df0fdd3be26390f9280e91dd669755e84d4980f636d831b42
|
data/CHANGELOG.md
ADDED
@@ -0,0 +1,64 @@
|
|
1
|
+
# Change Log
|
2
|
+
All notable changes to this project will be documented in this file, from 0.2.0.
|
3
|
+
|
4
|
+
## [5.3.0] - 2017-11-08
|
5
|
+
- Adds configuration options `enable_event_as_json_keyword` and `event_as_json_keyword`
|
6
|
+
- Adds BigDecimal support
|
7
|
+
- Adds additional logging for debugging purposes (with thanks to @mlkmhd's work)
|
8
|
+
|
9
|
+
## [5.2.1] - 2017-04-09
|
10
|
+
- Adds Array and Hash to_json support for non-sprintf syntax
|
11
|
+
|
12
|
+
## [5.2.0] - 2017-04-01
|
13
|
+
- Upgrades HikariCP to latest
|
14
|
+
- Fixes HikariCP logging integration issues
|
15
|
+
|
16
|
+
## [5.1.0] - 2016-12-17
|
17
|
+
- phoenix-thin fixes for issue #60
|
18
|
+
|
19
|
+
## [5.0.0] - 2016-11-03
|
20
|
+
- logstash v5 support
|
21
|
+
|
22
|
+
## [0.3.1] - 2016-08-28
|
23
|
+
- Adds connection_test configuration option, to prevent the connection test from occuring, allowing the error to be suppressed.
|
24
|
+
Useful for cockroachdb deployments. https://github.com/theangryangel/logstash-output-jdbc/issues/53
|
25
|
+
|
26
|
+
## [0.3.0] - 2016-07-24
|
27
|
+
- Brings tests from v5 branch, providing greater coverage
|
28
|
+
- Removes bulk update support, due to inconsistent behaviour
|
29
|
+
- Plugin now marked as threadsafe, meaning only 1 instance per-Logstash
|
30
|
+
- Raises default max_pool_size to match the default number of workers (1 connection per worker)
|
31
|
+
|
32
|
+
## [0.2.10] - 2016-07-07
|
33
|
+
- Support non-string entries in statement array
|
34
|
+
- Adds backtrace to exception logging
|
35
|
+
|
36
|
+
## [0.2.9] - 2016-06-29
|
37
|
+
- Fix NameError exception.
|
38
|
+
- Moved log_jdbc_exception calls
|
39
|
+
|
40
|
+
## [0.2.7] - 2016-05-29
|
41
|
+
- Backport retry exception logic from v5 branch
|
42
|
+
- Backport improved timestamp compatibility from v5 branch
|
43
|
+
|
44
|
+
## [0.2.6] - 2016-05-02
|
45
|
+
- Fix for exception infinite loop
|
46
|
+
|
47
|
+
## [0.2.5] - 2016-04-11
|
48
|
+
### Added
|
49
|
+
- Basic tests running against DerbyDB
|
50
|
+
- Fix for converting Logstash::Timestamp to iso8601 from @hordijk
|
51
|
+
|
52
|
+
## [0.2.4] - 2016-04-07
|
53
|
+
- Documentation fixes from @hordijk
|
54
|
+
|
55
|
+
## [0.2.3] - 2016-02-16
|
56
|
+
- Bug fixes
|
57
|
+
|
58
|
+
## [0.2.2] - 2015-12-30
|
59
|
+
- Bug fixes
|
60
|
+
|
61
|
+
## [0.2.1] - 2015-12-22
|
62
|
+
- Support for connection pooling support added through HikariCP
|
63
|
+
- Support for unsafe statement handling (allowing dynamic queries)
|
64
|
+
- Altered exception handling to now count sequential flushes with exceptions thrown
|
data/Gemfile
ADDED
@@ -0,0 +1,11 @@
|
|
1
|
+
source 'https://rubygems.org'
|
2
|
+
|
3
|
+
gemspec
|
4
|
+
|
5
|
+
logstash_path = ENV["LOGSTASH_PATH"] || "../../logstash"
|
6
|
+
use_logstash_source = ENV["LOGSTASH_SOURCE"] && ENV["LOGSTASH_SOURCE"].to_s == "1"
|
7
|
+
|
8
|
+
if Dir.exist?(logstash_path) && use_logstash_source
|
9
|
+
gem 'logstash-core', :path => "#{logstash_path}/logstash-core"
|
10
|
+
gem 'logstash-core-plugin-api', :path => "#{logstash_path}/logstash-core-plugin-api"
|
11
|
+
end
|
data/README.md
CHANGED
@@ -1,6 +1,6 @@
|
|
1
1
|
# logstash-output-jdbc
|
2
2
|
|
3
|
-
[![Build Status](https://travis-ci.org/theangryangel/logstash-output-jdbc.svg?branch=master)](https://travis-ci.org/theangryangel/logstash-output-jdbc)
|
3
|
+
[![Build Status](https://travis-ci.org/theangryangel/logstash-output-jdbc.svg?branch=master)](https://travis-ci.org/theangryangel/logstash-output-jdbc) [![Flattr this git repo](http://api.flattr.com/button/flattr-badge-large.png)](https://flattr.com/submit/auto?user_id=the_angry_angel&url=https://github.com/the_angry_angel/logstash-output-jdbc&title=logstash-output-jdbc&language=&tags=github&category=software)
|
4
4
|
|
5
5
|
This plugin is provided as an external plugin and is not part of the Logstash project.
|
6
6
|
|
@@ -14,56 +14,80 @@ If you do find this works for a JDBC driver without an example, let me know and
|
|
14
14
|
This plugin does not bundle any JDBC jar files, and does expect them to be in a
|
15
15
|
particular location. Please ensure you read the 4 installation lines below.
|
16
16
|
|
17
|
-
##
|
17
|
+
## Support & release schedule
|
18
|
+
I no longer have time at work to maintain this plugin in step with Logstash's releases, and I am not completely immersed in the Logstash ecosystem. If something is broken for you I will do my best to help, but I cannot guarantee timeframes.
|
19
|
+
|
20
|
+
Pull requests are always welcome.
|
21
|
+
|
22
|
+
## Changelog
|
18
23
|
See CHANGELOG.md
|
19
24
|
|
20
25
|
## Versions
|
21
26
|
Released versions are available via rubygems, and typically tagged.
|
22
27
|
|
23
28
|
For development:
|
24
|
-
- See master branch for logstash v5
|
29
|
+
- See master branch for logstash v5 & v6 :warning: This is untested under Logstash 6.3 at this time, and there has been 1 unverified report of an issue. Please use at your own risk until I can find the time to evaluate and test 6.3.
|
25
30
|
- See v2.x branch for logstash v2
|
26
31
|
- See v1.5 branch for logstash v1.5
|
27
32
|
- See v1.4 branch for logstash 1.4
|
28
33
|
|
29
34
|
## Installation
|
30
|
-
- Run `bin/plugin install logstash-output-jdbc` in your logstash installation directory
|
35
|
+
- Run `bin/logstash-plugin install logstash-output-jdbc` in your logstash installation directory
|
31
36
|
- Now either:
|
32
37
|
- Use driver_jar_path in your configuraton to specify a path to your jar file
|
33
38
|
- Or:
|
34
39
|
- Create the directory vendor/jar/jdbc in your logstash installation (`mkdir -p vendor/jar/jdbc/`)
|
35
40
|
- Add JDBC jar files to vendor/jar/jdbc in your logstash installation
|
36
|
-
- And then configure (examples
|
37
|
-
|
38
|
-
## Running tests
|
39
|
-
At this time tests only run against Derby, in an in-memory database.
|
40
|
-
Acceptance tests for individual database engines will be added over time.
|
41
|
-
|
42
|
-
Assuming valid jruby is installed
|
43
|
-
- First time, issue `jruby -S bundle install` to install dependencies
|
44
|
-
- Next, download Derby jar from https://db.apache.org/derby/
|
45
|
-
- Run the tests `JDBC_DERBY_JAR=path/to/derby.jar jruby -S rspec`
|
46
|
-
- Optionally add the `JDBC_DEBUG=1` env variable to add logging to stdout
|
41
|
+
- And then configure (examples can be found in the examples directory)
|
47
42
|
|
48
43
|
## Configuration options
|
49
44
|
|
50
|
-
| Option
|
51
|
-
| ------
|
52
|
-
| driver_class
|
53
|
-
| driver_auto_commit
|
54
|
-
| driver_jar_path
|
55
|
-
| connection_string
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
45
|
+
| Option | Type | Description | Required? | Default |
|
46
|
+
| ------ | ---- | ----------- | --------- | ------- |
|
47
|
+
| driver_class | String | Specify a driver class if autoloading fails | No | |
|
48
|
+
| driver_auto_commit | Boolean | If the driver does not support auto commit, you should set this to false | No | True |
|
49
|
+
| driver_jar_path | String | File path to jar file containing your JDBC driver. This is optional, and all JDBC jars may be placed in $LOGSTASH_HOME/vendor/jar/jdbc instead. | No | |
|
50
|
+
| connection_string | String | JDBC connection URL | Yes | |
|
51
|
+
| connection_test | Boolean | Run a JDBC connection test. Some drivers do not function correctly, and you may need to disable the connection test to supress an error. Cockroach with the postgres JDBC driver is such an example. | No | Yes |
|
52
|
+
| connection_test_query | String | Connection test and init query string, required for some JDBC drivers that don't support isValid(). Typically you'd set to this "SELECT 1" | No | |
|
53
|
+
| username | String | JDBC username - this is optional as it may be included in the connection string, for many drivers | No | |
|
54
|
+
| password | String | JDBC password - this is optional as it may be included in the connection string, for many drivers | No | |
|
55
|
+
| statement | Array | An array of strings representing the SQL statement to run. Index 0 is the SQL statement that is prepared, all other array entries are passed in as parameters (in order). A parameter may either be a property of the event (i.e. "@timestamp", or "host") or a formatted string (i.e. "%{host} - %{message}" or "%{message}"). If a key is passed then it will be automatically converted as required for insertion into SQL. If it's a formatted string then it will be passed in verbatim. | Yes | |
|
56
|
+
| unsafe_statement | Boolean | If yes, the statement is evaluated for event fields - this allows you to use dynamic table names, etc. **This is highly dangerous** and you should **not** use this unless you are 100% sure that the field(s) you are passing in are 100% safe. Failure to do so will result in possible SQL injections. Example statement: [ "insert into %{table_name_field} (column) values(?)", "fieldname" ] | No | False |
|
57
|
+
| max_pool_size | Number | Maximum number of connections to open to the SQL server at any 1 time | No | 5 |
|
58
|
+
| connection_timeout | Number | Number of milliseconds before a SQL connection is closed | No | 10000 |
|
59
|
+
| flush_size | Number | Maximum number of entries to buffer before sending to SQL - if this is reached before idle_flush_time | No | 1000 |
|
60
|
+
| max_flush_exceptions | Number | Number of sequential flushes which cause an exception, before the set of events are discarded. Set to a value less than 1 if you never want it to stop. This should be carefully configured with respect to retry_initial_interval and retry_max_interval, if your SQL server is not highly available | No | 10 |
|
61
|
+
| retry_initial_interval | Number | Number of seconds before the initial retry in the event of a failure. On each failure it will be doubled until it reaches retry_max_interval | No | 2 |
|
62
|
+
| retry_max_interval | Number | Maximum number of seconds between each retry | No | 128 |
|
63
|
+
| retry_sql_states | Array of strings | An array of custom SQL state codes you wish to retry until `max_flush_exceptions`. Useful if you're using a JDBC driver which returns retry-able, but non-standard SQL state codes in it's exceptions. | No | [] |
|
64
|
+
| event_as_json_keyword | String | The magic key word that the plugin looks for to convert the entire event into a JSON object. As Logstash does not support this out of the box with it's `sprintf` implementation, you can use whatever this field is set to in the statement parameters | No | @event |
|
65
|
+
| enable_event_as_json_keyword | Boolean | Enables the magic keyword set in the configuration option `event_as_json_keyword`. Without this enabled the plugin will not convert the `event_as_json_keyword` into JSON encoding of the entire event. | No | False |
|
65
66
|
|
66
67
|
## Example configurations
|
67
68
|
Example logstash configurations, can now be found in the examples directory. Where possible we try to link every configuration with a tested jar.
|
68
69
|
|
69
70
|
If you have a working sample configuration, for a DB thats not listed, pull requests are welcome.
|
71
|
+
|
72
|
+
## Development and Running tests
|
73
|
+
For development tests are recommended to run inside a virtual machine (Vagrantfile is included in the repo), as it requires
|
74
|
+
access to various database engines and could completely destroy any data in a live system.
|
75
|
+
|
76
|
+
If you have vagrant available (this is temporary whilst I'm hacking on v5 support. I'll make this more streamlined later):
|
77
|
+
- `vagrant up`
|
78
|
+
- `vagrant ssh`
|
79
|
+
- `cd /vagrant`
|
80
|
+
- `gem install bundler`
|
81
|
+
- `cd /vagrant && bundle install && bundle exec rake vendor && bundle exec rake install_jars`
|
82
|
+
- `./scripts/travis-before_script.sh && source ./scripts/travis-variables.sh`
|
83
|
+
- `bundle exec rspec`
|
84
|
+
|
85
|
+
## Releasing
|
86
|
+
- Update Changelog
|
87
|
+
- Bump version in gemspec
|
88
|
+
- Commit
|
89
|
+
- Create tag `git tag v<version-number-in-gemspec>`
|
90
|
+
- `bundle exec rake install_jars`
|
91
|
+
- `bundle exec rake pre_release_checks`
|
92
|
+
- `gem build logstash-output-jdbc.gemspec`
|
93
|
+
- `gem push`
|
data/THANKS.md
ADDED
@@ -0,0 +1,18 @@
|
|
1
|
+
logstash-output-jdbc is a project originally created by Karl Southern
|
2
|
+
(the_angry_angel), but there are a number of people that have contributed
|
3
|
+
or implemented key features over time. We do our best to keep this list
|
4
|
+
up-to-date, but you can also have a look at the nice contributor graphs
|
5
|
+
produced by GitHub: https://github.com/theangryangel/logstash-output-jdbc/graphs/contributors
|
6
|
+
|
7
|
+
* [hordijk](https://github.com/hordijk)
|
8
|
+
* [dmitryakadiamond](https://github.com/dmitryakadiamond)
|
9
|
+
* [MassimoSporchia](https://github.com/MassimoSporchia)
|
10
|
+
* [ebuildy](https://github.com/ebuildy)
|
11
|
+
* [kushtrimjunuzi](https://github.com/kushtrimjunuzi)
|
12
|
+
* [josemazo](https://github.com/josemazo)
|
13
|
+
* [aceoliver](https://github.com/aceoliver)
|
14
|
+
* [roflmao](https://github.com/roflmao)
|
15
|
+
* [onesuper](https://github.com/onesuper)
|
16
|
+
* [phr0gz](https://github.com/phr0gz)
|
17
|
+
* [jMonsinjon](https://github.com/jMonsinjon)
|
18
|
+
* [mlkmhd](https://github.com/mlkmhd)
|
Binary file
|
Binary file
|
@@ -1,5 +1,5 @@
|
|
1
1
|
# encoding: utf-8
|
2
2
|
require 'logstash/environment'
|
3
3
|
|
4
|
-
root_dir = File.expand_path(File.join(File.dirname(__FILE__),
|
5
|
-
LogStash::Environment.load_runtime_jars! File.join(root_dir,
|
4
|
+
root_dir = File.expand_path(File.join(File.dirname(__FILE__), '..'))
|
5
|
+
LogStash::Environment.load_runtime_jars! File.join(root_dir, 'vendor')
|
@@ -1,10 +1,12 @@
|
|
1
1
|
# encoding: utf-8
|
2
|
-
require
|
3
|
-
require
|
4
|
-
require
|
5
|
-
require
|
6
|
-
require
|
7
|
-
require
|
2
|
+
require 'logstash/outputs/base'
|
3
|
+
require 'logstash/namespace'
|
4
|
+
require 'concurrent'
|
5
|
+
require 'stud/interval'
|
6
|
+
require 'java'
|
7
|
+
require 'logstash-output-jdbc_jars'
|
8
|
+
require 'json'
|
9
|
+
require 'bigdecimal'
|
8
10
|
|
9
11
|
# Write events to a SQL engine, using JDBC.
|
10
12
|
#
|
@@ -12,8 +14,7 @@ require "logstash-output-jdbc_ring-buffer"
|
|
12
14
|
# includes correctly crafting the SQL statement, and matching the number of
|
13
15
|
# parameters correctly.
|
14
16
|
class LogStash::Outputs::Jdbc < LogStash::Outputs::Base
|
15
|
-
|
16
|
-
include Stud::Buffer
|
17
|
+
concurrency :shared
|
17
18
|
|
18
19
|
STRFTIME_FMT = '%Y-%m-%d %T.%L'.freeze
|
19
20
|
|
@@ -33,137 +34,114 @@ class LogStash::Outputs::Jdbc < LogStash::Outputs::Base
|
|
33
34
|
'58', # System Error
|
34
35
|
].freeze
|
35
36
|
|
36
|
-
config_name
|
37
|
+
config_name 'jdbc'
|
37
38
|
|
38
39
|
# Driver class - Reintroduced for https://github.com/theangryangel/logstash-output-jdbc/issues/26
|
39
|
-
config :driver_class, :
|
40
|
+
config :driver_class, validate: :string
|
40
41
|
|
41
42
|
# Does the JDBC driver support autocommit?
|
42
|
-
config :driver_auto_commit, :
|
43
|
+
config :driver_auto_commit, validate: :boolean, default: true, required: true
|
43
44
|
|
44
45
|
# Where to find the jar
|
45
46
|
# Defaults to not required, and to the original behaviour
|
46
|
-
config :driver_jar_path, :
|
47
|
+
config :driver_jar_path, validate: :string, required: false
|
47
48
|
|
48
49
|
# jdbc connection string
|
49
|
-
config :connection_string, :
|
50
|
+
config :connection_string, validate: :string, required: true
|
50
51
|
|
51
52
|
# jdbc username - optional, maybe in the connection string
|
52
|
-
config :username, :
|
53
|
+
config :username, validate: :string, required: false
|
53
54
|
|
54
55
|
# jdbc password - optional, maybe in the connection string
|
55
|
-
config :password, :
|
56
|
+
config :password, validate: :string, required: false
|
56
57
|
|
57
58
|
# [ "insert into table (message) values(?)", "%{message}" ]
|
58
|
-
config :statement, :
|
59
|
+
config :statement, validate: :array, required: true
|
59
60
|
|
60
61
|
# If this is an unsafe statement, use event.sprintf
|
61
|
-
# This also has potential performance penalties due to having to create a
|
62
|
+
# This also has potential performance penalties due to having to create a
|
62
63
|
# new statement for each event, rather than adding to the batch and issuing
|
63
64
|
# multiple inserts in 1 go
|
64
|
-
config :unsafe_statement, :
|
65
|
+
config :unsafe_statement, validate: :boolean, default: false
|
65
66
|
|
66
67
|
# Number of connections in the pool to maintain
|
67
|
-
config :max_pool_size, :
|
68
|
+
config :max_pool_size, validate: :number, default: 5
|
68
69
|
|
69
70
|
# Connection timeout
|
70
|
-
config :connection_timeout, :
|
71
|
+
config :connection_timeout, validate: :number, default: 10000
|
71
72
|
|
72
73
|
# We buffer a certain number of events before flushing that out to SQL.
|
73
74
|
# This setting controls how many events will be buffered before sending a
|
74
75
|
# batch of events.
|
75
|
-
config :flush_size, :
|
76
|
-
|
77
|
-
#
|
78
|
-
|
79
|
-
|
80
|
-
#
|
81
|
-
|
82
|
-
|
83
|
-
#
|
84
|
-
#
|
85
|
-
|
86
|
-
#
|
87
|
-
# If you change this value please ensure that you change
|
88
|
-
# max_flush_exceptions accordingly.
|
89
|
-
config :idle_flush_time, :validate => :number, :default => 1
|
90
|
-
|
91
|
-
# Maximum number of sequential flushes which encounter exceptions, before we stop retrying.
|
92
|
-
# If set to < 1, then it will infinitely retry.
|
93
|
-
#
|
94
|
-
# You should carefully tune this in relation to idle_flush_time if your SQL server
|
95
|
-
# is not highly available.
|
96
|
-
# i.e. If your idle_flush_time is 1, and your max_flush_exceptions is 200, and your SQL server takes
|
97
|
-
# longer than 200 seconds to reboot, then logstash will stop.
|
98
|
-
config :max_flush_exceptions, :validate => :number, :default => 0
|
76
|
+
config :flush_size, validate: :number, default: 1000
|
77
|
+
|
78
|
+
# Set initial interval in seconds between retries. Doubled on each retry up to `retry_max_interval`
|
79
|
+
config :retry_initial_interval, validate: :number, default: 2
|
80
|
+
|
81
|
+
# Maximum time between retries, in seconds
|
82
|
+
config :retry_max_interval, validate: :number, default: 128
|
83
|
+
|
84
|
+
# Any additional custom, retryable SQL state codes.
|
85
|
+
# Suitable for configuring retryable custom JDBC SQL state codes.
|
86
|
+
config :retry_sql_states, validate: :array, default: []
|
99
87
|
|
100
|
-
|
101
|
-
config :
|
88
|
+
# Run a connection test on start.
|
89
|
+
config :connection_test, validate: :boolean, default: true
|
90
|
+
|
91
|
+
# Connection test and init string, required for some JDBC endpoints
|
92
|
+
# notable phoenix-thin - see logstash-output-jdbc issue #60
|
93
|
+
config :connection_test_query, validate: :string, required: false
|
94
|
+
|
95
|
+
# Maximum number of sequential failed attempts, before we stop retrying.
|
96
|
+
# If set to < 1, then it will infinitely retry.
|
97
|
+
# At the default values this is a little over 10 minutes
|
98
|
+
config :max_flush_exceptions, validate: :number, default: 10
|
99
|
+
|
100
|
+
config :max_repeat_exceptions, obsolete: 'This has been replaced by max_flush_exceptions - which behaves slightly differently. Please check the documentation.'
|
101
|
+
config :max_repeat_exceptions_time, obsolete: 'This is no longer required'
|
102
|
+
config :idle_flush_time, obsolete: 'No longer necessary under Logstash v5'
|
103
|
+
|
104
|
+
# Allows the whole event to be converted to JSON
|
105
|
+
config :enable_event_as_json_keyword, validate: :boolean, default: false
|
106
|
+
|
107
|
+
# The magic key used to convert the whole event to JSON. If you need this, and you have the default in your events, you can use this to change your magic keyword.
|
108
|
+
config :event_as_json_keyword, validate: :string, default: '@event'
|
102
109
|
|
103
|
-
public
|
104
110
|
def register
|
105
|
-
@logger.info(
|
111
|
+
@logger.info('JDBC - Starting up')
|
106
112
|
|
107
113
|
load_jar_files!
|
108
114
|
|
109
|
-
@
|
115
|
+
@stopping = Concurrent::AtomicBoolean.new(false)
|
110
116
|
|
111
|
-
if
|
112
|
-
@logger.warn("JDBC - Flush size is set to > 1000")
|
113
|
-
end
|
117
|
+
@logger.warn('JDBC - Flush size is set to > 1000') if @flush_size > 1000
|
114
118
|
|
115
|
-
if @statement.
|
116
|
-
@logger.error(
|
119
|
+
if @statement.empty?
|
120
|
+
@logger.error('JDBC - No statement provided. Configuration error.')
|
117
121
|
end
|
118
122
|
|
119
|
-
if
|
123
|
+
if !@unsafe_statement && @statement.length < 2
|
120
124
|
@logger.error("JDBC - Statement has no parameters. No events will be inserted into SQL as you're not passing any event data. Likely configuration error.")
|
121
125
|
end
|
122
126
|
|
123
127
|
setup_and_test_pool!
|
124
|
-
|
125
|
-
buffer_initialize(
|
126
|
-
:max_items => @flush_size,
|
127
|
-
:max_interval => @idle_flush_time,
|
128
|
-
:logger => @logger
|
129
|
-
)
|
130
128
|
end
|
131
129
|
|
132
|
-
def
|
133
|
-
|
134
|
-
|
135
|
-
|
136
|
-
buffer_receive(event)
|
137
|
-
end
|
138
|
-
|
139
|
-
def flush(events, teardown=false)
|
140
|
-
if @unsafe_statement == true
|
141
|
-
unsafe_flush(events, teardown)
|
142
|
-
else
|
143
|
-
safe_flush(events, teardown)
|
130
|
+
def multi_receive(events)
|
131
|
+
events.each_slice(@flush_size) do |slice|
|
132
|
+
retrying_submit(slice)
|
144
133
|
end
|
145
134
|
end
|
146
135
|
|
147
|
-
def
|
148
|
-
|
149
|
-
|
150
|
-
@exceptions_tracker << e.class
|
151
|
-
|
152
|
-
if @exceptions_tracker.reject { |i| i.nil? }.count >= @max_flush_exceptions
|
153
|
-
@logger.error("JDBC - max_flush_exceptions has been reached")
|
154
|
-
raise LogStash::ShutdownSignal.new
|
155
|
-
end
|
156
|
-
end
|
157
|
-
|
158
|
-
def teardown
|
159
|
-
buffer_flush(:final => true)
|
160
|
-
@pool.close()
|
136
|
+
def close
|
137
|
+
@stopping.make_true
|
138
|
+
@pool.close
|
161
139
|
super
|
162
140
|
end
|
163
141
|
|
164
142
|
private
|
165
143
|
|
166
|
-
def setup_and_test_pool!
|
144
|
+
def setup_and_test_pool!
|
167
145
|
# Setup pool
|
168
146
|
@pool = Java::ComZaxxerHikari::HikariDataSource.new
|
169
147
|
|
@@ -180,121 +158,128 @@ class LogStash::Outputs::Jdbc < LogStash::Outputs::Base
|
|
180
158
|
|
181
159
|
validate_connection_timeout = (@connection_timeout / 1000) / 2
|
182
160
|
|
161
|
+
if !@connection_test_query.nil? and @connection_test_query.length > 1
|
162
|
+
@pool.setConnectionTestQuery(@connection_test_query)
|
163
|
+
@pool.setConnectionInitSql(@connection_test_query)
|
164
|
+
end
|
165
|
+
|
166
|
+
return unless @connection_test
|
167
|
+
|
183
168
|
# Test connection
|
184
|
-
test_connection = @pool.getConnection
|
169
|
+
test_connection = @pool.getConnection
|
185
170
|
unless test_connection.isValid(validate_connection_timeout)
|
186
|
-
@logger.
|
171
|
+
@logger.warn('JDBC - Connection is not reporting as validate. Either connection is invalid, or driver is not getting the appropriate response.')
|
187
172
|
end
|
188
|
-
test_connection.close
|
173
|
+
test_connection.close
|
189
174
|
end
|
190
175
|
|
191
176
|
def load_jar_files!
|
192
177
|
# Load jar from driver path
|
193
178
|
unless @driver_jar_path.nil?
|
194
|
-
raise
|
179
|
+
raise LogStash::ConfigurationError, 'JDBC - Could not find jar file at given path. Check config.' unless File.exist? @driver_jar_path
|
195
180
|
require @driver_jar_path
|
196
181
|
return
|
197
182
|
end
|
198
183
|
|
199
184
|
# Revert original behaviour of loading from vendor directory
|
200
185
|
# if no path given
|
201
|
-
if ENV['LOGSTASH_HOME']
|
202
|
-
|
203
|
-
|
204
|
-
|
205
|
-
|
186
|
+
jarpath = if ENV['LOGSTASH_HOME']
|
187
|
+
File.join(ENV['LOGSTASH_HOME'], '/vendor/jar/jdbc/*.jar')
|
188
|
+
else
|
189
|
+
File.join(File.dirname(__FILE__), '../../../vendor/jar/jdbc/*.jar')
|
190
|
+
end
|
206
191
|
|
207
|
-
@logger.
|
192
|
+
@logger.trace('JDBC - jarpath', path: jarpath)
|
208
193
|
|
209
194
|
jars = Dir[jarpath]
|
210
|
-
raise
|
195
|
+
raise LogStash::ConfigurationError, 'JDBC - No jars found. Have you read the README?' if jars.empty?
|
211
196
|
|
212
197
|
jars.each do |jar|
|
213
|
-
@logger.
|
198
|
+
@logger.trace('JDBC - Loaded jar', jar: jar)
|
214
199
|
require jar
|
215
200
|
end
|
216
201
|
end
|
217
202
|
|
218
|
-
def
|
203
|
+
def submit(events)
|
219
204
|
connection = nil
|
220
205
|
statement = nil
|
221
|
-
|
206
|
+
events_to_retry = []
|
207
|
+
|
222
208
|
begin
|
223
|
-
connection = @pool.getConnection
|
209
|
+
connection = @pool.getConnection
|
224
210
|
rescue => e
|
225
|
-
log_jdbc_exception(e, true)
|
226
|
-
|
211
|
+
log_jdbc_exception(e, true, nil)
|
212
|
+
# If a connection is not available, then the server has gone away
|
213
|
+
# We're not counting that towards our retry count.
|
214
|
+
return events, false
|
227
215
|
end
|
228
216
|
|
229
|
-
|
230
|
-
|
231
|
-
|
232
|
-
|
233
|
-
|
234
|
-
|
235
|
-
statement
|
236
|
-
|
237
|
-
|
238
|
-
|
239
|
-
|
240
|
-
|
241
|
-
|
242
|
-
@exceptions_tracker << nil
|
243
|
-
rescue => e
|
244
|
-
if retry_exception?(e)
|
245
|
-
raise
|
217
|
+
events.each do |event|
|
218
|
+
begin
|
219
|
+
statement = connection.prepareStatement(
|
220
|
+
(@unsafe_statement == true) ? event.sprintf(@statement[0]) : @statement[0]
|
221
|
+
)
|
222
|
+
statement = add_statement_event_params(statement, event) if @statement.length > 1
|
223
|
+
statement.execute
|
224
|
+
rescue => e
|
225
|
+
if retry_exception?(e, event.to_json())
|
226
|
+
events_to_retry.push(event)
|
227
|
+
end
|
228
|
+
ensure
|
229
|
+
statement.close unless statement.nil?
|
246
230
|
end
|
247
|
-
ensure
|
248
|
-
statement.close() unless statement.nil?
|
249
|
-
connection.close() unless connection.nil?
|
250
231
|
end
|
232
|
+
|
233
|
+
connection.close unless connection.nil?
|
234
|
+
|
235
|
+
return events_to_retry, true
|
251
236
|
end
|
252
237
|
|
253
|
-
def
|
254
|
-
|
255
|
-
|
256
|
-
|
257
|
-
connection = @pool.getConnection()
|
258
|
-
rescue => e
|
259
|
-
log_jdbc_exception(e, true)
|
260
|
-
raise
|
261
|
-
end
|
238
|
+
def retrying_submit(actions)
|
239
|
+
# Initially we submit the full list of actions
|
240
|
+
submit_actions = actions
|
241
|
+
count_as_attempt = true
|
262
242
|
|
263
|
-
|
264
|
-
events.each do |event|
|
265
|
-
next if event.cancelled?
|
243
|
+
attempts = 1
|
266
244
|
|
267
|
-
|
268
|
-
|
245
|
+
sleep_interval = @retry_initial_interval
|
246
|
+
while @stopping.false? and (submit_actions and !submit_actions.empty?)
|
247
|
+
return if !submit_actions || submit_actions.empty? # If everything's a success we move along
|
248
|
+
# We retry whatever didn't succeed
|
249
|
+
submit_actions, count_as_attempt = submit(submit_actions)
|
269
250
|
|
270
|
-
|
251
|
+
# Everything was a success!
|
252
|
+
break if !submit_actions || submit_actions.empty?
|
271
253
|
|
272
|
-
|
273
|
-
|
274
|
-
|
275
|
-
|
276
|
-
|
277
|
-
|
278
|
-
|
279
|
-
raise
|
254
|
+
if @max_flush_exceptions > 0 and count_as_attempt == true
|
255
|
+
attempts += 1
|
256
|
+
|
257
|
+
if attempts > @max_flush_exceptions
|
258
|
+
@logger.error("JDBC - max_flush_exceptions has been reached. #{submit_actions.length} events have been unable to be sent to SQL and are being dropped. See previously logged exceptions for details.")
|
259
|
+
break
|
260
|
+
end
|
280
261
|
end
|
281
|
-
|
282
|
-
|
283
|
-
|
262
|
+
|
263
|
+
# If we're retrying the action sleep for the recommended interval
|
264
|
+
# Double the interval for the next time through to achieve exponential backoff
|
265
|
+
Stud.stoppable_sleep(sleep_interval) { @stopping.true? }
|
266
|
+
sleep_interval = next_sleep_interval(sleep_interval)
|
284
267
|
end
|
285
268
|
end
|
286
269
|
|
287
270
|
def add_statement_event_params(statement, event)
|
288
271
|
@statement[1..-1].each_with_index do |i, idx|
|
289
|
-
if i.is_a? String
|
290
|
-
value = event
|
272
|
+
if @enable_event_as_json_keyword == true and i.is_a? String and i == @event_as_json_keyword
|
273
|
+
value = event.to_json
|
274
|
+
elsif i.is_a? String
|
275
|
+
value = event.get(i)
|
291
276
|
if value.nil? and i =~ /%\{/
|
292
277
|
value = event.sprintf(i)
|
293
278
|
end
|
294
279
|
else
|
295
280
|
value = i
|
296
281
|
end
|
297
|
-
|
282
|
+
|
298
283
|
case value
|
299
284
|
when Time
|
300
285
|
# See LogStash::Timestamp, below, for the why behind strftime.
|
@@ -309,11 +294,19 @@ class LogStash::Outputs::Jdbc < LogStash::Outputs::Base
|
|
309
294
|
# strftime appears to be the most reliable across drivers.
|
310
295
|
statement.setString(idx + 1, value.time.strftime(STRFTIME_FMT))
|
311
296
|
when Fixnum, Integer
|
312
|
-
|
297
|
+
if value > 2147483647 or value < -2147483648
|
298
|
+
statement.setLong(idx + 1, value)
|
299
|
+
else
|
300
|
+
statement.setInt(idx + 1, value)
|
301
|
+
end
|
302
|
+
when BigDecimal
|
303
|
+
statement.setBigDecimal(idx + 1, value.to_java)
|
313
304
|
when Float
|
314
305
|
statement.setFloat(idx + 1, value)
|
315
306
|
when String
|
316
307
|
statement.setString(idx + 1, value)
|
308
|
+
when Array, Hash
|
309
|
+
statement.setString(idx + 1, value.to_json)
|
317
310
|
when true, false
|
318
311
|
statement.setBoolean(idx + 1, value)
|
319
312
|
else
|
@@ -324,14 +317,23 @@ class LogStash::Outputs::Jdbc < LogStash::Outputs::Base
|
|
324
317
|
statement
|
325
318
|
end
|
326
319
|
|
320
|
+
def retry_exception?(exception, event)
|
321
|
+
retrying = (exception.respond_to? 'getSQLState' and (RETRYABLE_SQLSTATE_CLASSES.include?(exception.getSQLState.to_s[0,2]) or @retry_sql_states.include?(exception.getSQLState)))
|
322
|
+
log_jdbc_exception(exception, retrying, event)
|
327
323
|
|
328
|
-
|
324
|
+
retrying
|
325
|
+
end
|
326
|
+
|
327
|
+
def log_jdbc_exception(exception, retrying, event)
|
329
328
|
current_exception = exception
|
330
|
-
log_text = 'JDBC - Exception. ' + (retrying ? 'Retrying' : 'Not retrying')
|
329
|
+
log_text = 'JDBC - Exception. ' + (retrying ? 'Retrying' : 'Not retrying')
|
330
|
+
|
331
331
|
log_method = (retrying ? 'warn' : 'error')
|
332
332
|
|
333
333
|
loop do
|
334
|
-
|
334
|
+
# TODO reformat event output so that it only shows the fields necessary.
|
335
|
+
|
336
|
+
@logger.send(log_method, log_text, :exception => current_exception, :statement => @statement[0], :event => event)
|
335
337
|
|
336
338
|
if current_exception.respond_to? 'getNextException'
|
337
339
|
current_exception = current_exception.getNextException()
|
@@ -343,10 +345,8 @@ class LogStash::Outputs::Jdbc < LogStash::Outputs::Base
|
|
343
345
|
end
|
344
346
|
end
|
345
347
|
|
346
|
-
def
|
347
|
-
|
348
|
-
|
349
|
-
|
350
|
-
retrying
|
348
|
+
def next_sleep_interval(current_interval)
|
349
|
+
doubled = current_interval * 2
|
350
|
+
doubled > @retry_max_interval ? @retry_max_interval : doubled
|
351
351
|
end
|
352
352
|
end # class LogStash::Outputs::jdbc
|