logstash-output-jdbc 0.2.10 → 5.4.0

Sign up to get free protection for your applications and to get access to all the features.
Files changed (42) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +64 -0
  3. data/Gemfile +11 -0
  4. data/README.md +54 -30
  5. data/THANKS.md +18 -0
  6. data/lib/com/zaxxer/HikariCP/2.4.7/HikariCP-2.4.7.jar +0 -0
  7. data/lib/com/zaxxer/HikariCP/2.7.2/HikariCP-2.7.2.jar +0 -0
  8. data/lib/logstash-output-jdbc_jars.rb +2 -2
  9. data/lib/logstash/outputs/jdbc.rb +163 -163
  10. data/lib/org/apache/logging/log4j/log4j-api/2.6.2/log4j-api-2.6.2.jar +0 -0
  11. data/lib/org/apache/logging/log4j/log4j-api/2.9.1/log4j-api-2.9.1.jar +0 -0
  12. data/lib/org/apache/logging/log4j/log4j-core/2.9.1/log4j-core-2.9.1.jar +0 -0
  13. data/lib/org/apache/logging/log4j/log4j-slf4j-impl/2.6.2/log4j-slf4j-impl-2.6.2.jar +0 -0
  14. data/lib/org/apache/logging/log4j/log4j-slf4j-impl/2.9.1/log4j-slf4j-impl-2.9.1.jar +0 -0
  15. data/lib/org/slf4j/slf4j-api/1.7.21/slf4j-api-1.7.21.jar +0 -0
  16. data/lib/org/slf4j/slf4j-api/1.7.25/slf4j-api-1.7.25.jar +0 -0
  17. data/logstash-output-jdbc.gemspec +32 -0
  18. data/spec/jdbc_spec_helper.rb +216 -0
  19. data/spec/outputs/jdbc_derby_spec.rb +33 -0
  20. data/spec/outputs/jdbc_mysql_spec.rb +24 -0
  21. data/spec/outputs/jdbc_postgres_spec.rb +41 -0
  22. data/spec/outputs/jdbc_spec.rb +4 -87
  23. data/spec/outputs/jdbc_sqlite_spec.rb +26 -0
  24. data/vendor/jar-dependencies/runtime-jars/HikariCP-2.7.2.jar +0 -0
  25. data/vendor/jar-dependencies/runtime-jars/log4j-api-2.6.2.jar +0 -0
  26. data/vendor/jar-dependencies/runtime-jars/log4j-slf4j-impl-2.6.2.jar +0 -0
  27. data/vendor/jar-dependencies/runtime-jars/slf4j-api-1.7.25.jar +0 -0
  28. metadata +94 -53
  29. data/LICENSE.txt +0 -21
  30. data/lib/com/zaxxer/HikariCP/2.4.2/HikariCP-2.4.2.jar +0 -0
  31. data/lib/com/zaxxer/HikariCP/2.4.6/HikariCP-2.4.6.jar +0 -0
  32. data/lib/logstash-output-jdbc_ring-buffer.rb +0 -17
  33. data/lib/org/slf4j/slf4j-api/1.7.12/slf4j-api-1.7.12.jar +0 -0
  34. data/lib/org/slf4j/slf4j-api/1.7.16/slf4j-api-1.7.16.jar +0 -0
  35. data/lib/org/slf4j/slf4j-log4j12/1.7.13/slf4j-log4j12-1.7.13.jar +0 -0
  36. data/vendor/jar-dependencies/runtime-jars/HikariCP-2.4.2.jar +0 -0
  37. data/vendor/jar-dependencies/runtime-jars/HikariCP-2.4.6.jar +0 -0
  38. data/vendor/jar-dependencies/runtime-jars/log4j-1.2.17.jar +0 -0
  39. data/vendor/jar-dependencies/runtime-jars/slf4j-api-1.7.13.jar +0 -0
  40. data/vendor/jar-dependencies/runtime-jars/slf4j-api-1.7.16.jar +0 -0
  41. data/vendor/jar-dependencies/runtime-jars/slf4j-log4j12-1.7.21.jar +0 -0
  42. data/vendor/jar-dependencies/runtime-jars/slf4j-nop-1.7.13.jar +0 -0
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA1:
3
- metadata.gz: 42d30ecc15a727d77d8524f69d4e0a72609aee39
4
- data.tar.gz: 92cda3714612b6cfea4bf7b29626456a16bb0bf1
3
+ metadata.gz: 2c9b504aa52e7be8ac8e76a75e925332a1978bdc
4
+ data.tar.gz: 6acb7adc74e8315ec06981677cc58e7cf9b25009
5
5
  SHA512:
6
- metadata.gz: 2dd985a6dc5042ac302d4f5d0623f3c252c3de98f2c3c41ab6da69fff1156a6cf65d2e3a6e4412a02cf63b8eab1f3392a5a39b0f8d30b28a15a53a76b87afef7
7
- data.tar.gz: 91c071c056330b22461f96b8874bc8fb9a0c2065d34d52b66622d74d6f791ea0f462be65a09e33eb4dbc9da707dd26442c591530b4ee0550454c9262e1e7721e
6
+ metadata.gz: cad42066d35d1b1f2457a7aa5a12c5e28471786830a4ec97031faccbddcda2bd1409a61dd7f9e9ffa959a1bf403601ddf3722bf64eec82dcf8263c9027cfa011
7
+ data.tar.gz: d0cea89d20fef7ccb53fd826bde6e7edd26b523aaa8d484d509171f7d1b7938b2e11b95364d76e2df0fdd3be26390f9280e91dd669755e84d4980f636d831b42
@@ -0,0 +1,64 @@
1
+ # Change Log
2
+ All notable changes to this project will be documented in this file, from 0.2.0.
3
+
4
+ ## [5.3.0] - 2017-11-08
5
+ - Adds configuration options `enable_event_as_json_keyword` and `event_as_json_keyword`
6
+ - Adds BigDecimal support
7
+ - Adds additional logging for debugging purposes (with thanks to @mlkmhd's work)
8
+
9
+ ## [5.2.1] - 2017-04-09
10
+ - Adds Array and Hash to_json support for non-sprintf syntax
11
+
12
+ ## [5.2.0] - 2017-04-01
13
+ - Upgrades HikariCP to latest
14
+ - Fixes HikariCP logging integration issues
15
+
16
+ ## [5.1.0] - 2016-12-17
17
+ - phoenix-thin fixes for issue #60
18
+
19
+ ## [5.0.0] - 2016-11-03
20
+ - logstash v5 support
21
+
22
+ ## [0.3.1] - 2016-08-28
23
+ - Adds connection_test configuration option, to prevent the connection test from occuring, allowing the error to be suppressed.
24
+ Useful for cockroachdb deployments. https://github.com/theangryangel/logstash-output-jdbc/issues/53
25
+
26
+ ## [0.3.0] - 2016-07-24
27
+ - Brings tests from v5 branch, providing greater coverage
28
+ - Removes bulk update support, due to inconsistent behaviour
29
+ - Plugin now marked as threadsafe, meaning only 1 instance per-Logstash
30
+ - Raises default max_pool_size to match the default number of workers (1 connection per worker)
31
+
32
+ ## [0.2.10] - 2016-07-07
33
+ - Support non-string entries in statement array
34
+ - Adds backtrace to exception logging
35
+
36
+ ## [0.2.9] - 2016-06-29
37
+ - Fix NameError exception.
38
+ - Moved log_jdbc_exception calls
39
+
40
+ ## [0.2.7] - 2016-05-29
41
+ - Backport retry exception logic from v5 branch
42
+ - Backport improved timestamp compatibility from v5 branch
43
+
44
+ ## [0.2.6] - 2016-05-02
45
+ - Fix for exception infinite loop
46
+
47
+ ## [0.2.5] - 2016-04-11
48
+ ### Added
49
+ - Basic tests running against DerbyDB
50
+ - Fix for converting Logstash::Timestamp to iso8601 from @hordijk
51
+
52
+ ## [0.2.4] - 2016-04-07
53
+ - Documentation fixes from @hordijk
54
+
55
+ ## [0.2.3] - 2016-02-16
56
+ - Bug fixes
57
+
58
+ ## [0.2.2] - 2015-12-30
59
+ - Bug fixes
60
+
61
+ ## [0.2.1] - 2015-12-22
62
+ - Support for connection pooling support added through HikariCP
63
+ - Support for unsafe statement handling (allowing dynamic queries)
64
+ - Altered exception handling to now count sequential flushes with exceptions thrown
data/Gemfile ADDED
@@ -0,0 +1,11 @@
1
+ source 'https://rubygems.org'
2
+
3
+ gemspec
4
+
5
+ logstash_path = ENV["LOGSTASH_PATH"] || "../../logstash"
6
+ use_logstash_source = ENV["LOGSTASH_SOURCE"] && ENV["LOGSTASH_SOURCE"].to_s == "1"
7
+
8
+ if Dir.exist?(logstash_path) && use_logstash_source
9
+ gem 'logstash-core', :path => "#{logstash_path}/logstash-core"
10
+ gem 'logstash-core-plugin-api', :path => "#{logstash_path}/logstash-core-plugin-api"
11
+ end
data/README.md CHANGED
@@ -1,6 +1,6 @@
1
1
  # logstash-output-jdbc
2
2
 
3
- [![Build Status](https://travis-ci.org/theangryangel/logstash-output-jdbc.svg?branch=master)](https://travis-ci.org/theangryangel/logstash-output-jdbc)
3
+ [![Build Status](https://travis-ci.org/theangryangel/logstash-output-jdbc.svg?branch=master)](https://travis-ci.org/theangryangel/logstash-output-jdbc) [![Flattr this git repo](http://api.flattr.com/button/flattr-badge-large.png)](https://flattr.com/submit/auto?user_id=the_angry_angel&url=https://github.com/the_angry_angel/logstash-output-jdbc&title=logstash-output-jdbc&language=&tags=github&category=software)
4
4
 
5
5
  This plugin is provided as an external plugin and is not part of the Logstash project.
6
6
 
@@ -14,56 +14,80 @@ If you do find this works for a JDBC driver without an example, let me know and
14
14
  This plugin does not bundle any JDBC jar files, and does expect them to be in a
15
15
  particular location. Please ensure you read the 4 installation lines below.
16
16
 
17
- ## ChangeLog
17
+ ## Support & release schedule
18
+ I no longer have time at work to maintain this plugin in step with Logstash's releases, and I am not completely immersed in the Logstash ecosystem. If something is broken for you I will do my best to help, but I cannot guarantee timeframes.
19
+
20
+ Pull requests are always welcome.
21
+
22
+ ## Changelog
18
23
  See CHANGELOG.md
19
24
 
20
25
  ## Versions
21
26
  Released versions are available via rubygems, and typically tagged.
22
27
 
23
28
  For development:
24
- - See master branch for logstash v5
29
+ - See master branch for logstash v5 & v6 :warning: This is untested under Logstash 6.3 at this time, and there has been 1 unverified report of an issue. Please use at your own risk until I can find the time to evaluate and test 6.3.
25
30
  - See v2.x branch for logstash v2
26
31
  - See v1.5 branch for logstash v1.5
27
32
  - See v1.4 branch for logstash 1.4
28
33
 
29
34
  ## Installation
30
- - Run `bin/plugin install logstash-output-jdbc` in your logstash installation directory
35
+ - Run `bin/logstash-plugin install logstash-output-jdbc` in your logstash installation directory
31
36
  - Now either:
32
37
  - Use driver_jar_path in your configuraton to specify a path to your jar file
33
38
  - Or:
34
39
  - Create the directory vendor/jar/jdbc in your logstash installation (`mkdir -p vendor/jar/jdbc/`)
35
40
  - Add JDBC jar files to vendor/jar/jdbc in your logstash installation
36
- - And then configure (examples below)
37
-
38
- ## Running tests
39
- At this time tests only run against Derby, in an in-memory database.
40
- Acceptance tests for individual database engines will be added over time.
41
-
42
- Assuming valid jruby is installed
43
- - First time, issue `jruby -S bundle install` to install dependencies
44
- - Next, download Derby jar from https://db.apache.org/derby/
45
- - Run the tests `JDBC_DERBY_JAR=path/to/derby.jar jruby -S rspec`
46
- - Optionally add the `JDBC_DEBUG=1` env variable to add logging to stdout
41
+ - And then configure (examples can be found in the examples directory)
47
42
 
48
43
  ## Configuration options
49
44
 
50
- | Option | Type | Description | Required? | Default |
51
- | ------ | ---- | ----------- | --------- | ------- |
52
- | driver_class | String | Specify a driver class if autoloading fails | No | |
53
- | driver_auto_commit | Boolean | If the driver does not support auto commit, you should set this to false | No | True |
54
- | driver_jar_path | String | File path to jar file containing your JDBC driver. This is optional, and all JDBC jars may be placed in $LOGSTASH_HOME/vendor/jar/jdbc instead. | No | |
55
- | connection_string | String | JDBC connection URL | Yes | |
56
- | username | String | JDBC username - this is optional as it may be included in the connection string, for many drivers | No | |
57
- | password | String | JDBC password - this is optional as it may be included in the connection string, for many drivers | No | |
58
- | statement | Array | An array of strings representing the SQL statement to run. Index 0 is the SQL statement that is prepared, all other array entries are passed in as parameters (in order). A parameter may either be a property of the event (i.e. "@timestamp", or "host") or a formatted string (i.e. "%{host} - %{message}" or "%{message}"). If a key is passed then it will be automatically converted as required for insertion into SQL. If it's a formatted string then it will be passed in verbatim. | Yes | |
59
- | unsafe_statement | Boolean | If yes, the statement is evaluated for event fields - this allows you to use dynamic table names, etc. **This is highly dangerous** and you should **not** use this unless you are 100% sure that the field(s) you are passing in are 100% safe. Failure to do so will result in possible SQL injections. Please be aware that there is also a potential performance penalty as each event must be evaluated and inserted into SQL one at a time, where as when this is false multiple events are inserted at once. Example statement: [ "insert into %{table_name_field} (column) values(?)", "fieldname" ] | No | False |
60
- | max_pool_size | Number | Maximum number of connections to open to the SQL server at any 1 time | No | 5 |
61
- | connection_timeout | Number | Number of seconds before a SQL connection is closed | No | 2800 |
62
- | flush_size | Number | Maximum number of entries to buffer before sending to SQL - if this is reached before idle_flush_time | No | 1000 |
63
- | idle_flush_time | Number | Number of idle seconds before sending data to SQL - even if the flush_size has not yet been reached | No | 1 |
64
- | max_flush_exceptions | Number | Number of sequential flushes which cause an exception, before we stop logstash. Set to a value less than 1 if you never want it to stop. This should be carefully configured with relation to idle_flush_time if your SQL instance is not highly available. | No | 0 |
45
+ | Option | Type | Description | Required? | Default |
46
+ | ------ | ---- | ----------- | --------- | ------- |
47
+ | driver_class | String | Specify a driver class if autoloading fails | No | |
48
+ | driver_auto_commit | Boolean | If the driver does not support auto commit, you should set this to false | No | True |
49
+ | driver_jar_path | String | File path to jar file containing your JDBC driver. This is optional, and all JDBC jars may be placed in $LOGSTASH_HOME/vendor/jar/jdbc instead. | No | |
50
+ | connection_string | String | JDBC connection URL | Yes | |
51
+ | connection_test | Boolean | Run a JDBC connection test. Some drivers do not function correctly, and you may need to disable the connection test to supress an error. Cockroach with the postgres JDBC driver is such an example. | No | Yes |
52
+ | connection_test_query | String | Connection test and init query string, required for some JDBC drivers that don't support isValid(). Typically you'd set to this "SELECT 1" | No | |
53
+ | username | String | JDBC username - this is optional as it may be included in the connection string, for many drivers | No | |
54
+ | password | String | JDBC password - this is optional as it may be included in the connection string, for many drivers | No | |
55
+ | statement | Array | An array of strings representing the SQL statement to run. Index 0 is the SQL statement that is prepared, all other array entries are passed in as parameters (in order). A parameter may either be a property of the event (i.e. "@timestamp", or "host") or a formatted string (i.e. "%{host} - %{message}" or "%{message}"). If a key is passed then it will be automatically converted as required for insertion into SQL. If it's a formatted string then it will be passed in verbatim. | Yes | |
56
+ | unsafe_statement | Boolean | If yes, the statement is evaluated for event fields - this allows you to use dynamic table names, etc. **This is highly dangerous** and you should **not** use this unless you are 100% sure that the field(s) you are passing in are 100% safe. Failure to do so will result in possible SQL injections. Example statement: [ "insert into %{table_name_field} (column) values(?)", "fieldname" ] | No | False |
57
+ | max_pool_size | Number | Maximum number of connections to open to the SQL server at any 1 time | No | 5 |
58
+ | connection_timeout | Number | Number of milliseconds before a SQL connection is closed | No | 10000 |
59
+ | flush_size | Number | Maximum number of entries to buffer before sending to SQL - if this is reached before idle_flush_time | No | 1000 |
60
+ | max_flush_exceptions | Number | Number of sequential flushes which cause an exception, before the set of events are discarded. Set to a value less than 1 if you never want it to stop. This should be carefully configured with respect to retry_initial_interval and retry_max_interval, if your SQL server is not highly available | No | 10 |
61
+ | retry_initial_interval | Number | Number of seconds before the initial retry in the event of a failure. On each failure it will be doubled until it reaches retry_max_interval | No | 2 |
62
+ | retry_max_interval | Number | Maximum number of seconds between each retry | No | 128 |
63
+ | retry_sql_states | Array of strings | An array of custom SQL state codes you wish to retry until `max_flush_exceptions`. Useful if you're using a JDBC driver which returns retry-able, but non-standard SQL state codes in it's exceptions. | No | [] |
64
+ | event_as_json_keyword | String | The magic key word that the plugin looks for to convert the entire event into a JSON object. As Logstash does not support this out of the box with it's `sprintf` implementation, you can use whatever this field is set to in the statement parameters | No | @event |
65
+ | enable_event_as_json_keyword | Boolean | Enables the magic keyword set in the configuration option `event_as_json_keyword`. Without this enabled the plugin will not convert the `event_as_json_keyword` into JSON encoding of the entire event. | No | False |
65
66
 
66
67
  ## Example configurations
67
68
  Example logstash configurations, can now be found in the examples directory. Where possible we try to link every configuration with a tested jar.
68
69
 
69
70
  If you have a working sample configuration, for a DB thats not listed, pull requests are welcome.
71
+
72
+ ## Development and Running tests
73
+ For development tests are recommended to run inside a virtual machine (Vagrantfile is included in the repo), as it requires
74
+ access to various database engines and could completely destroy any data in a live system.
75
+
76
+ If you have vagrant available (this is temporary whilst I'm hacking on v5 support. I'll make this more streamlined later):
77
+ - `vagrant up`
78
+ - `vagrant ssh`
79
+ - `cd /vagrant`
80
+ - `gem install bundler`
81
+ - `cd /vagrant && bundle install && bundle exec rake vendor && bundle exec rake install_jars`
82
+ - `./scripts/travis-before_script.sh && source ./scripts/travis-variables.sh`
83
+ - `bundle exec rspec`
84
+
85
+ ## Releasing
86
+ - Update Changelog
87
+ - Bump version in gemspec
88
+ - Commit
89
+ - Create tag `git tag v<version-number-in-gemspec>`
90
+ - `bundle exec rake install_jars`
91
+ - `bundle exec rake pre_release_checks`
92
+ - `gem build logstash-output-jdbc.gemspec`
93
+ - `gem push`
@@ -0,0 +1,18 @@
1
+ logstash-output-jdbc is a project originally created by Karl Southern
2
+ (the_angry_angel), but there are a number of people that have contributed
3
+ or implemented key features over time. We do our best to keep this list
4
+ up-to-date, but you can also have a look at the nice contributor graphs
5
+ produced by GitHub: https://github.com/theangryangel/logstash-output-jdbc/graphs/contributors
6
+
7
+ * [hordijk](https://github.com/hordijk)
8
+ * [dmitryakadiamond](https://github.com/dmitryakadiamond)
9
+ * [MassimoSporchia](https://github.com/MassimoSporchia)
10
+ * [ebuildy](https://github.com/ebuildy)
11
+ * [kushtrimjunuzi](https://github.com/kushtrimjunuzi)
12
+ * [josemazo](https://github.com/josemazo)
13
+ * [aceoliver](https://github.com/aceoliver)
14
+ * [roflmao](https://github.com/roflmao)
15
+ * [onesuper](https://github.com/onesuper)
16
+ * [phr0gz](https://github.com/phr0gz)
17
+ * [jMonsinjon](https://github.com/jMonsinjon)
18
+ * [mlkmhd](https://github.com/mlkmhd)
@@ -1,5 +1,5 @@
1
1
  # encoding: utf-8
2
2
  require 'logstash/environment'
3
3
 
4
- root_dir = File.expand_path(File.join(File.dirname(__FILE__), ".."))
5
- LogStash::Environment.load_runtime_jars! File.join(root_dir, "vendor")
4
+ root_dir = File.expand_path(File.join(File.dirname(__FILE__), '..'))
5
+ LogStash::Environment.load_runtime_jars! File.join(root_dir, 'vendor')
@@ -1,10 +1,12 @@
1
1
  # encoding: utf-8
2
- require "logstash/outputs/base"
3
- require "logstash/namespace"
4
- require "stud/buffer"
5
- require "java"
6
- require "logstash-output-jdbc_jars"
7
- require "logstash-output-jdbc_ring-buffer"
2
+ require 'logstash/outputs/base'
3
+ require 'logstash/namespace'
4
+ require 'concurrent'
5
+ require 'stud/interval'
6
+ require 'java'
7
+ require 'logstash-output-jdbc_jars'
8
+ require 'json'
9
+ require 'bigdecimal'
8
10
 
9
11
  # Write events to a SQL engine, using JDBC.
10
12
  #
@@ -12,8 +14,7 @@ require "logstash-output-jdbc_ring-buffer"
12
14
  # includes correctly crafting the SQL statement, and matching the number of
13
15
  # parameters correctly.
14
16
  class LogStash::Outputs::Jdbc < LogStash::Outputs::Base
15
- # Adds buffer support
16
- include Stud::Buffer
17
+ concurrency :shared
17
18
 
18
19
  STRFTIME_FMT = '%Y-%m-%d %T.%L'.freeze
19
20
 
@@ -33,137 +34,114 @@ class LogStash::Outputs::Jdbc < LogStash::Outputs::Base
33
34
  '58', # System Error
34
35
  ].freeze
35
36
 
36
- config_name "jdbc"
37
+ config_name 'jdbc'
37
38
 
38
39
  # Driver class - Reintroduced for https://github.com/theangryangel/logstash-output-jdbc/issues/26
39
- config :driver_class, :validate => :string
40
+ config :driver_class, validate: :string
40
41
 
41
42
  # Does the JDBC driver support autocommit?
42
- config :driver_auto_commit, :validate => :boolean, :default => true, :required => true
43
+ config :driver_auto_commit, validate: :boolean, default: true, required: true
43
44
 
44
45
  # Where to find the jar
45
46
  # Defaults to not required, and to the original behaviour
46
- config :driver_jar_path, :validate => :string, :required => false
47
+ config :driver_jar_path, validate: :string, required: false
47
48
 
48
49
  # jdbc connection string
49
- config :connection_string, :validate => :string, :required => true
50
+ config :connection_string, validate: :string, required: true
50
51
 
51
52
  # jdbc username - optional, maybe in the connection string
52
- config :username, :validate => :string, :required => false
53
+ config :username, validate: :string, required: false
53
54
 
54
55
  # jdbc password - optional, maybe in the connection string
55
- config :password, :validate => :string, :required => false
56
+ config :password, validate: :string, required: false
56
57
 
57
58
  # [ "insert into table (message) values(?)", "%{message}" ]
58
- config :statement, :validate => :array, :required => true
59
+ config :statement, validate: :array, required: true
59
60
 
60
61
  # If this is an unsafe statement, use event.sprintf
61
- # This also has potential performance penalties due to having to create a
62
+ # This also has potential performance penalties due to having to create a
62
63
  # new statement for each event, rather than adding to the batch and issuing
63
64
  # multiple inserts in 1 go
64
- config :unsafe_statement, :validate => :boolean, :default => false
65
+ config :unsafe_statement, validate: :boolean, default: false
65
66
 
66
67
  # Number of connections in the pool to maintain
67
- config :max_pool_size, :validate => :number, :default => 5
68
+ config :max_pool_size, validate: :number, default: 5
68
69
 
69
70
  # Connection timeout
70
- config :connection_timeout, :validate => :number, :default => 10000
71
+ config :connection_timeout, validate: :number, default: 10000
71
72
 
72
73
  # We buffer a certain number of events before flushing that out to SQL.
73
74
  # This setting controls how many events will be buffered before sending a
74
75
  # batch of events.
75
- config :flush_size, :validate => :number, :default => 1000
76
-
77
- # The amount of time since last flush before a flush is forced.
78
- #
79
- # This setting helps ensure slow event rates don't get stuck in Logstash.
80
- # For example, if your `flush_size` is 100, and you have received 10 events,
81
- # and it has been more than `idle_flush_time` seconds since the last flush,
82
- # Logstash will flush those 10 events automatically.
83
- #
84
- # This helps keep both fast and slow log streams moving along in
85
- # a timely manner.
86
- #
87
- # If you change this value please ensure that you change
88
- # max_flush_exceptions accordingly.
89
- config :idle_flush_time, :validate => :number, :default => 1
90
-
91
- # Maximum number of sequential flushes which encounter exceptions, before we stop retrying.
92
- # If set to < 1, then it will infinitely retry.
93
- #
94
- # You should carefully tune this in relation to idle_flush_time if your SQL server
95
- # is not highly available.
96
- # i.e. If your idle_flush_time is 1, and your max_flush_exceptions is 200, and your SQL server takes
97
- # longer than 200 seconds to reboot, then logstash will stop.
98
- config :max_flush_exceptions, :validate => :number, :default => 0
76
+ config :flush_size, validate: :number, default: 1000
77
+
78
+ # Set initial interval in seconds between retries. Doubled on each retry up to `retry_max_interval`
79
+ config :retry_initial_interval, validate: :number, default: 2
80
+
81
+ # Maximum time between retries, in seconds
82
+ config :retry_max_interval, validate: :number, default: 128
83
+
84
+ # Any additional custom, retryable SQL state codes.
85
+ # Suitable for configuring retryable custom JDBC SQL state codes.
86
+ config :retry_sql_states, validate: :array, default: []
99
87
 
100
- config :max_repeat_exceptions, :obsolete => "This has been replaced by max_flush_exceptions - which behaves slightly differently. Please check the documentation."
101
- config :max_repeat_exceptions_time, :obsolete => "This is no longer required"
88
+ # Run a connection test on start.
89
+ config :connection_test, validate: :boolean, default: true
90
+
91
+ # Connection test and init string, required for some JDBC endpoints
92
+ # notable phoenix-thin - see logstash-output-jdbc issue #60
93
+ config :connection_test_query, validate: :string, required: false
94
+
95
+ # Maximum number of sequential failed attempts, before we stop retrying.
96
+ # If set to < 1, then it will infinitely retry.
97
+ # At the default values this is a little over 10 minutes
98
+ config :max_flush_exceptions, validate: :number, default: 10
99
+
100
+ config :max_repeat_exceptions, obsolete: 'This has been replaced by max_flush_exceptions - which behaves slightly differently. Please check the documentation.'
101
+ config :max_repeat_exceptions_time, obsolete: 'This is no longer required'
102
+ config :idle_flush_time, obsolete: 'No longer necessary under Logstash v5'
103
+
104
+ # Allows the whole event to be converted to JSON
105
+ config :enable_event_as_json_keyword, validate: :boolean, default: false
106
+
107
+ # The magic key used to convert the whole event to JSON. If you need this, and you have the default in your events, you can use this to change your magic keyword.
108
+ config :event_as_json_keyword, validate: :string, default: '@event'
102
109
 
103
- public
104
110
  def register
105
- @logger.info("JDBC - Starting up")
111
+ @logger.info('JDBC - Starting up')
106
112
 
107
113
  load_jar_files!
108
114
 
109
- @exceptions_tracker = RingBuffer.new(@max_flush_exceptions)
115
+ @stopping = Concurrent::AtomicBoolean.new(false)
110
116
 
111
- if (@flush_size > 1000)
112
- @logger.warn("JDBC - Flush size is set to > 1000")
113
- end
117
+ @logger.warn('JDBC - Flush size is set to > 1000') if @flush_size > 1000
114
118
 
115
- if @statement.length < 1
116
- @logger.error("JDBC - No statement provided. Configuration error.")
119
+ if @statement.empty?
120
+ @logger.error('JDBC - No statement provided. Configuration error.')
117
121
  end
118
122
 
119
- if (!@unsafe_statement and @statement.length < 2)
123
+ if !@unsafe_statement && @statement.length < 2
120
124
  @logger.error("JDBC - Statement has no parameters. No events will be inserted into SQL as you're not passing any event data. Likely configuration error.")
121
125
  end
122
126
 
123
127
  setup_and_test_pool!
124
-
125
- buffer_initialize(
126
- :max_items => @flush_size,
127
- :max_interval => @idle_flush_time,
128
- :logger => @logger
129
- )
130
128
  end
131
129
 
132
- def receive(event)
133
- return unless output?(event) or event.cancelled?
134
- return unless @statement.length > 0
135
-
136
- buffer_receive(event)
137
- end
138
-
139
- def flush(events, teardown=false)
140
- if @unsafe_statement == true
141
- unsafe_flush(events, teardown)
142
- else
143
- safe_flush(events, teardown)
130
+ def multi_receive(events)
131
+ events.each_slice(@flush_size) do |slice|
132
+ retrying_submit(slice)
144
133
  end
145
134
  end
146
135
 
147
- def on_flush_error(e)
148
- return if @max_flush_exceptions < 1
149
-
150
- @exceptions_tracker << e.class
151
-
152
- if @exceptions_tracker.reject { |i| i.nil? }.count >= @max_flush_exceptions
153
- @logger.error("JDBC - max_flush_exceptions has been reached")
154
- raise LogStash::ShutdownSignal.new
155
- end
156
- end
157
-
158
- def teardown
159
- buffer_flush(:final => true)
160
- @pool.close()
136
+ def close
137
+ @stopping.make_true
138
+ @pool.close
161
139
  super
162
140
  end
163
141
 
164
142
  private
165
143
 
166
- def setup_and_test_pool!
144
+ def setup_and_test_pool!
167
145
  # Setup pool
168
146
  @pool = Java::ComZaxxerHikari::HikariDataSource.new
169
147
 
@@ -180,121 +158,128 @@ class LogStash::Outputs::Jdbc < LogStash::Outputs::Base
180
158
 
181
159
  validate_connection_timeout = (@connection_timeout / 1000) / 2
182
160
 
161
+ if !@connection_test_query.nil? and @connection_test_query.length > 1
162
+ @pool.setConnectionTestQuery(@connection_test_query)
163
+ @pool.setConnectionInitSql(@connection_test_query)
164
+ end
165
+
166
+ return unless @connection_test
167
+
183
168
  # Test connection
184
- test_connection = @pool.getConnection()
169
+ test_connection = @pool.getConnection
185
170
  unless test_connection.isValid(validate_connection_timeout)
186
- @logger.error("JDBC - Connection is not valid. Please check connection string or that your JDBC endpoint is available.")
171
+ @logger.warn('JDBC - Connection is not reporting as validate. Either connection is invalid, or driver is not getting the appropriate response.')
187
172
  end
188
- test_connection.close()
173
+ test_connection.close
189
174
  end
190
175
 
191
176
  def load_jar_files!
192
177
  # Load jar from driver path
193
178
  unless @driver_jar_path.nil?
194
- raise Exception.new("JDBC - Could not find jar file at given path. Check config.") unless File.exists? @driver_jar_path
179
+ raise LogStash::ConfigurationError, 'JDBC - Could not find jar file at given path. Check config.' unless File.exist? @driver_jar_path
195
180
  require @driver_jar_path
196
181
  return
197
182
  end
198
183
 
199
184
  # Revert original behaviour of loading from vendor directory
200
185
  # if no path given
201
- if ENV['LOGSTASH_HOME']
202
- jarpath = File.join(ENV['LOGSTASH_HOME'], "/vendor/jar/jdbc/*.jar")
203
- else
204
- jarpath = File.join(File.dirname(__FILE__), "../../../vendor/jar/jdbc/*.jar")
205
- end
186
+ jarpath = if ENV['LOGSTASH_HOME']
187
+ File.join(ENV['LOGSTASH_HOME'], '/vendor/jar/jdbc/*.jar')
188
+ else
189
+ File.join(File.dirname(__FILE__), '../../../vendor/jar/jdbc/*.jar')
190
+ end
206
191
 
207
- @logger.debug("JDBC - jarpath", path: jarpath)
192
+ @logger.trace('JDBC - jarpath', path: jarpath)
208
193
 
209
194
  jars = Dir[jarpath]
210
- raise Exception.new("JDBC - No jars found in jarpath. Have you read the README?") if jars.empty?
195
+ raise LogStash::ConfigurationError, 'JDBC - No jars found. Have you read the README?' if jars.empty?
211
196
 
212
197
  jars.each do |jar|
213
- @logger.debug("JDBC - Loaded jar", :jar => jar)
198
+ @logger.trace('JDBC - Loaded jar', jar: jar)
214
199
  require jar
215
200
  end
216
201
  end
217
202
 
218
- def safe_flush(events, teardown=false)
203
+ def submit(events)
219
204
  connection = nil
220
205
  statement = nil
221
-
206
+ events_to_retry = []
207
+
222
208
  begin
223
- connection = @pool.getConnection()
209
+ connection = @pool.getConnection
224
210
  rescue => e
225
- log_jdbc_exception(e, true)
226
- raise
211
+ log_jdbc_exception(e, true, nil)
212
+ # If a connection is not available, then the server has gone away
213
+ # We're not counting that towards our retry count.
214
+ return events, false
227
215
  end
228
216
 
229
- begin
230
- statement = connection.prepareStatement(@statement[0])
231
-
232
- events.each do |event|
233
- next if event.cancelled?
234
- next if @statement.length < 2
235
- statement = add_statement_event_params(statement, event)
236
-
237
- statement.addBatch()
238
- end
239
-
240
- statement.executeBatch()
241
- statement.close()
242
- @exceptions_tracker << nil
243
- rescue => e
244
- if retry_exception?(e)
245
- raise
217
+ events.each do |event|
218
+ begin
219
+ statement = connection.prepareStatement(
220
+ (@unsafe_statement == true) ? event.sprintf(@statement[0]) : @statement[0]
221
+ )
222
+ statement = add_statement_event_params(statement, event) if @statement.length > 1
223
+ statement.execute
224
+ rescue => e
225
+ if retry_exception?(e, event.to_json())
226
+ events_to_retry.push(event)
227
+ end
228
+ ensure
229
+ statement.close unless statement.nil?
246
230
  end
247
- ensure
248
- statement.close() unless statement.nil?
249
- connection.close() unless connection.nil?
250
231
  end
232
+
233
+ connection.close unless connection.nil?
234
+
235
+ return events_to_retry, true
251
236
  end
252
237
 
253
- def unsafe_flush(events, teardown=false)
254
- connection = nil
255
- statement = nil
256
- begin
257
- connection = @pool.getConnection()
258
- rescue => e
259
- log_jdbc_exception(e, true)
260
- raise
261
- end
238
+ def retrying_submit(actions)
239
+ # Initially we submit the full list of actions
240
+ submit_actions = actions
241
+ count_as_attempt = true
262
242
 
263
- begin
264
- events.each do |event|
265
- next if event.cancelled?
243
+ attempts = 1
266
244
 
267
- statement = connection.prepareStatement(event.sprintf(@statement[0]))
268
- statement = add_statement_event_params(statement, event) if @statement.length > 1
245
+ sleep_interval = @retry_initial_interval
246
+ while @stopping.false? and (submit_actions and !submit_actions.empty?)
247
+ return if !submit_actions || submit_actions.empty? # If everything's a success we move along
248
+ # We retry whatever didn't succeed
249
+ submit_actions, count_as_attempt = submit(submit_actions)
269
250
 
270
- statement.execute()
251
+ # Everything was a success!
252
+ break if !submit_actions || submit_actions.empty?
271
253
 
272
- # cancel the event, since we may end up outputting the same event multiple times
273
- # if an exception happens later down the line
274
- event.cancel
275
- @exceptions_tracker << nil
276
- end
277
- rescue => e
278
- if retry_exception?(e)
279
- raise
254
+ if @max_flush_exceptions > 0 and count_as_attempt == true
255
+ attempts += 1
256
+
257
+ if attempts > @max_flush_exceptions
258
+ @logger.error("JDBC - max_flush_exceptions has been reached. #{submit_actions.length} events have been unable to be sent to SQL and are being dropped. See previously logged exceptions for details.")
259
+ break
260
+ end
280
261
  end
281
- ensure
282
- statement.close() unless statement.nil?
283
- connection.close() unless connection.nil?
262
+
263
+ # If we're retrying the action sleep for the recommended interval
264
+ # Double the interval for the next time through to achieve exponential backoff
265
+ Stud.stoppable_sleep(sleep_interval) { @stopping.true? }
266
+ sleep_interval = next_sleep_interval(sleep_interval)
284
267
  end
285
268
  end
286
269
 
287
270
  def add_statement_event_params(statement, event)
288
271
  @statement[1..-1].each_with_index do |i, idx|
289
- if i.is_a? String
290
- value = event[i]
272
+ if @enable_event_as_json_keyword == true and i.is_a? String and i == @event_as_json_keyword
273
+ value = event.to_json
274
+ elsif i.is_a? String
275
+ value = event.get(i)
291
276
  if value.nil? and i =~ /%\{/
292
277
  value = event.sprintf(i)
293
278
  end
294
279
  else
295
280
  value = i
296
281
  end
297
-
282
+
298
283
  case value
299
284
  when Time
300
285
  # See LogStash::Timestamp, below, for the why behind strftime.
@@ -309,11 +294,19 @@ class LogStash::Outputs::Jdbc < LogStash::Outputs::Base
309
294
  # strftime appears to be the most reliable across drivers.
310
295
  statement.setString(idx + 1, value.time.strftime(STRFTIME_FMT))
311
296
  when Fixnum, Integer
312
- statement.setInt(idx + 1, value)
297
+ if value > 2147483647 or value < -2147483648
298
+ statement.setLong(idx + 1, value)
299
+ else
300
+ statement.setInt(idx + 1, value)
301
+ end
302
+ when BigDecimal
303
+ statement.setBigDecimal(idx + 1, value.to_java)
313
304
  when Float
314
305
  statement.setFloat(idx + 1, value)
315
306
  when String
316
307
  statement.setString(idx + 1, value)
308
+ when Array, Hash
309
+ statement.setString(idx + 1, value.to_json)
317
310
  when true, false
318
311
  statement.setBoolean(idx + 1, value)
319
312
  else
@@ -324,14 +317,23 @@ class LogStash::Outputs::Jdbc < LogStash::Outputs::Base
324
317
  statement
325
318
  end
326
319
 
320
+ def retry_exception?(exception, event)
321
+ retrying = (exception.respond_to? 'getSQLState' and (RETRYABLE_SQLSTATE_CLASSES.include?(exception.getSQLState.to_s[0,2]) or @retry_sql_states.include?(exception.getSQLState)))
322
+ log_jdbc_exception(exception, retrying, event)
327
323
 
328
- def log_jdbc_exception(exception, retrying)
324
+ retrying
325
+ end
326
+
327
+ def log_jdbc_exception(exception, retrying, event)
329
328
  current_exception = exception
330
- log_text = 'JDBC - Exception. ' + (retrying ? 'Retrying' : 'Not retrying') + '.'
329
+ log_text = 'JDBC - Exception. ' + (retrying ? 'Retrying' : 'Not retrying')
330
+
331
331
  log_method = (retrying ? 'warn' : 'error')
332
332
 
333
333
  loop do
334
- @logger.send(log_method, log_text, :exception => current_exception, :backtrace => current_exception.backtrace)
334
+ # TODO reformat event output so that it only shows the fields necessary.
335
+
336
+ @logger.send(log_method, log_text, :exception => current_exception, :statement => @statement[0], :event => event)
335
337
 
336
338
  if current_exception.respond_to? 'getNextException'
337
339
  current_exception = current_exception.getNextException()
@@ -343,10 +345,8 @@ class LogStash::Outputs::Jdbc < LogStash::Outputs::Base
343
345
  end
344
346
  end
345
347
 
346
- def retry_exception?(exception)
347
- retrying = (exception.respond_to? 'getSQLState' and RETRYABLE_SQLSTATE_CLASSES.include?(exception.getSQLState.to_s[0,2]))
348
- log_jdbc_exception(exception, retrying)
349
-
350
- retrying
348
+ def next_sleep_interval(current_interval)
349
+ doubled = current_interval * 2
350
+ doubled > @retry_max_interval ? @retry_max_interval : doubled
351
351
  end
352
352
  end # class LogStash::Outputs::jdbc