logstash-core 2.1.0.snapshot2-java → 2.1.0.snapshot3-java

Sign up to get free protection for your applications and to get access to all the features.

Potentially problematic release.


This version of logstash-core might be problematic. Click here for more details.

checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA1:
3
- metadata.gz: df26ae829d2eb22f63fa59c3b1ef2c12e6f6cb08
4
- data.tar.gz: fb085312c7eb9eab720792dfae8d37940ea3c56a
3
+ metadata.gz: e2e8b28701312405fff7a8a55eb13c4e9ebf49a0
4
+ data.tar.gz: 0f454692f8c2dced12e11fa2932c363e770850f4
5
5
  SHA512:
6
- metadata.gz: d70144723adacaf709e415ff9b68f42ad0d0a623d1268e78a04cc1e5bc24be747575297ed6ebeeb94e70c8cc7ae199b6cc18aacd7ceb1ea29ec1e5dd57db6fa1
7
- data.tar.gz: 3db5d330c35e1cd0a7b29b29f615ec2753acbee2bfb933f7c40a037d7377bfb05659133567ea320158ae31a6762be3b94e5a9e83bff7624d93fa5a5d00ccb004
6
+ metadata.gz: 875d29d85f12bb01346a1330b00ff67811d78eff3238a70054f329d97cc429a1a168798e19c69042a6cf1b822f193d7f1ae97181f58e75f40aeffcfbd24c6ce6
7
+ data.tar.gz: fc3bca1eb07e1f7cbc711e0ab3581bf8b001eb5d5550d9a454895626bc3342bcd0d8a5f03a1bbaf34dee9a26e876ad7f6e25b7aac6f0965cc6bdeec0860c075a
@@ -50,6 +50,11 @@ class LogStash::Agent < Clamp::Command
50
50
  I18n.t("logstash.agent.flag.configtest"),
51
51
  :attribute_name => :config_test
52
52
 
53
+ option "--[no-]allow-unsafe-shutdown", :flag,
54
+ I18n.t("logstash.agent.flag.unsafe_shutdown"),
55
+ :attribute_name => :unsafe_shutdown,
56
+ :default => false
57
+
53
58
  # Emit a warning message.
54
59
  def warn(message)
55
60
  # For now, all warnings are fatal.
@@ -75,6 +80,9 @@ class LogStash::Agent < Clamp::Command
75
80
  require "logstash/plugin"
76
81
  @logger = Cabin::Channel.get(LogStash)
77
82
 
83
+ LogStash::ShutdownController.unsafe_shutdown = unsafe_shutdown?
84
+ LogStash::ShutdownController.logger = @logger
85
+
78
86
  if version?
79
87
  show_version
80
88
  return 0
@@ -176,8 +184,7 @@ class LogStash::Agent < Clamp::Command
176
184
 
177
185
  def shutdown(pipeline)
178
186
  pipeline.shutdown do
179
- InflightEventsReporter.logger = @logger
180
- InflightEventsReporter.start(pipeline.input_to_filter, pipeline.filter_to_output, pipeline.outputs)
187
+ ::LogStash::ShutdownController.start(pipeline)
181
188
  end
182
189
  end
183
190
 
@@ -143,6 +143,7 @@ class LogStash::Filters::Base < LogStash::Plugin
143
143
  # @return [Array<LogStash::Event] filtered events and any new events generated by the filter
144
144
  public
145
145
  def multi_filter(events)
146
+ LogStash::Util.set_thread_plugin(self)
146
147
  result = []
147
148
  events.each do |event|
148
149
  unless event.cancelled?
@@ -23,7 +23,7 @@ class LogStash::Outputs::Base < LogStash::Plugin
23
23
  # Note that this setting may not be useful for all outputs.
24
24
  config :workers, :validate => :number, :default => 1
25
25
 
26
- attr_reader :worker_plugins, :worker_queue
26
+ attr_reader :worker_plugins, :worker_queue, :worker_threads
27
27
 
28
28
  public
29
29
  def workers_not_supported(message=nil)
@@ -56,13 +56,15 @@ class LogStash::Outputs::Base < LogStash::Plugin
56
56
  def worker_setup
57
57
  if @workers == 1
58
58
  @worker_plugins = [self]
59
+ @worker_threads = []
59
60
  else
60
61
  define_singleton_method(:handle, method(:handle_worker))
61
62
  @worker_queue = SizedQueue.new(20)
62
63
  @worker_plugins = @workers.times.map { self.class.new(@original_params.merge("workers" => 1)) }
63
- @worker_plugins.map.with_index do |plugin, i|
64
+ @worker_threads = @worker_plugins.map.with_index do |plugin, i|
64
65
  Thread.new(original_params, @worker_queue) do |params, queue|
65
- LogStash::Util::set_thread_name(">#{self.class.config_name}.#{i}")
66
+ LogStash::Util.set_thread_name(">#{self.class.config_name}.#{i}")
67
+ LogStash::Util.set_thread_plugin(self)
66
68
  plugin.register
67
69
  while true
68
70
  event = queue.pop
@@ -75,10 +77,12 @@ class LogStash::Outputs::Base < LogStash::Plugin
75
77
 
76
78
  public
77
79
  def handle(event)
80
+ LogStash::Util.set_thread_plugin(self)
78
81
  receive(event)
79
82
  end # def handle
80
83
 
81
84
  def handle_worker(event)
85
+ LogStash::Util.set_thread_plugin(self)
82
86
  @worker_queue.push(event)
83
87
  end
84
88
 
@@ -9,11 +9,11 @@ require "logstash/config/file"
9
9
  require "logstash/filters/base"
10
10
  require "logstash/inputs/base"
11
11
  require "logstash/outputs/base"
12
- require "logstash/util/reporter"
13
12
  require "logstash/config/cpu_core_strategy"
14
13
  require "logstash/util/defaults_printer"
14
+ require "logstash/shutdown_controller"
15
15
 
16
- class LogStash::Pipeline
16
+ module LogStash; class Pipeline
17
17
  attr_reader :inputs, :filters, :outputs, :input_to_filter, :filter_to_output
18
18
 
19
19
  def initialize(configstr)
@@ -25,6 +25,7 @@ class LogStash::Pipeline
25
25
 
26
26
  grammar = LogStashConfigParser.new
27
27
  @config = grammar.parse(configstr)
28
+
28
29
  if @config.nil?
29
30
  raise LogStash::ConfigurationError, grammar.failure_reason
30
31
  end
@@ -170,8 +171,11 @@ class LogStash::Pipeline
170
171
  # dynamically get thread count based on filter threadsafety
171
172
  # moved this test to here to allow for future config reloading
172
173
  to_start = safe_filter_worker_count
173
- @filter_threads = to_start.times.collect do
174
- Thread.new { filterworker }
174
+ @filter_threads = to_start.times.collect do |i|
175
+ Thread.new do
176
+ LogStash::Util.set_thread_name("|filterworker.#{i}")
177
+ filterworker
178
+ end
175
179
  end
176
180
  actually_started = @filter_threads.select(&:alive?).size
177
181
  msg = "Worker threads expected: #{to_start}, worker threads started: #{actually_started}"
@@ -195,7 +199,8 @@ class LogStash::Pipeline
195
199
  end
196
200
 
197
201
  def inputworker(plugin)
198
- LogStash::Util::set_thread_name("<#{plugin.class.config_name}")
202
+ LogStash::Util.set_thread_name("<#{plugin.class.config_name}")
203
+ LogStash::Util.set_thread_plugin(plugin)
199
204
  begin
200
205
  plugin.run(@input_to_filter)
201
206
  rescue => e
@@ -228,7 +233,6 @@ class LogStash::Pipeline
228
233
  end # def inputworker
229
234
 
230
235
  def filterworker
231
- LogStash::Util.set_thread_name("|worker")
232
236
  begin
233
237
  while true
234
238
  event = @input_to_filter.pop
@@ -270,6 +274,7 @@ class LogStash::Pipeline
270
274
  event = @filter_to_output.pop
271
275
  break if event == LogStash::SHUTDOWN
272
276
  output_func(event)
277
+ LogStash::Util.set_thread_plugin(nil)
273
278
  end
274
279
  ensure
275
280
  @outputs.each do |output|
@@ -329,4 +334,49 @@ class LogStash::Pipeline
329
334
  end
330
335
  end # flush_filters_to_output!
331
336
 
332
- end # class Pipeline
337
+ def inflight_count
338
+ data = {}
339
+ total = 0
340
+
341
+ input_to_filter = @input_to_filter.size
342
+ total += input_to_filter
343
+ filter_to_output = @filter_to_output.size
344
+ total += filter_to_output
345
+
346
+ data["input_to_filter"] = input_to_filter if input_to_filter > 0
347
+ data["filter_to_output"] = filter_to_output if filter_to_output > 0
348
+
349
+ output_worker_queues = []
350
+ @outputs.each do |output|
351
+ next unless output.worker_queue && output.worker_queue.size > 0
352
+ plugin_info = output.debug_info
353
+ size = output.worker_queue.size
354
+ total += size
355
+ plugin_info << size
356
+ output_worker_queues << plugin_info
357
+ end
358
+ data["output_worker_queues"] = output_worker_queues unless output_worker_queues.empty?
359
+ data["total"] = total
360
+ data
361
+ end
362
+
363
+ def stalling_threads
364
+ plugin_threads
365
+ .reject {|t| t["blocked_on"] } # known begnin blocking statuses
366
+ .each {|t| t.delete("backtrace") }
367
+ .each {|t| t.delete("blocked_on") }
368
+ .each {|t| t.delete("status") }
369
+ end
370
+
371
+ def plugin_threads
372
+ input_threads = @input_threads.select {|t| t.alive? }.map {|t| thread_info(t) }
373
+ filter_threads = @filter_threads.select {|t| t.alive? }.map {|t| thread_info(t) }
374
+ output_threads = @output_threads.select {|t| t.alive? }.map {|t| thread_info(t) }
375
+ output_worker_threads = @outputs.flat_map {|output| output.worker_threads }.map {|t| thread_info(t) }
376
+ input_threads + filter_threads + output_threads + output_worker_threads
377
+ end
378
+
379
+ def thread_info(thread)
380
+ LogStash::Util.thread_info(thread)
381
+ end
382
+ end; end
@@ -59,6 +59,11 @@ class LogStash::Plugin
59
59
  end
60
60
  end
61
61
 
62
+ public
63
+ def debug_info
64
+ [self.class.to_s, original_params]
65
+ end
66
+
62
67
  # Look up a plugin by type and name.
63
68
  public
64
69
  def self.lookup(type, name)
@@ -0,0 +1,127 @@
1
+ # encoding: utf-8
2
+
3
+ module LogStash
4
+ class ShutdownController
5
+
6
+ CHECK_EVERY = 1 # second
7
+ REPORT_EVERY = 5 # checks
8
+ ABORT_AFTER = 3 # stalled reports
9
+
10
+ attr_reader :cycle_period, :report_every, :abort_threshold
11
+
12
+ def initialize(pipeline, cycle_period=CHECK_EVERY, report_every=REPORT_EVERY, abort_threshold=ABORT_AFTER)
13
+ @pipeline = pipeline
14
+ @cycle_period = cycle_period
15
+ @report_every = report_every
16
+ @abort_threshold = abort_threshold
17
+ @reports = []
18
+ end
19
+
20
+ def self.unsafe_shutdown=(boolean)
21
+ @unsafe_shutdown = boolean
22
+ end
23
+
24
+ def self.unsafe_shutdown?
25
+ @unsafe_shutdown
26
+ end
27
+
28
+ def self.logger=(logger)
29
+ @logger = logger
30
+ end
31
+
32
+ def self.logger
33
+ @logger ||= Cabin::Channel.get(LogStash)
34
+ end
35
+
36
+ def self.start(pipeline, cycle_period=CHECK_EVERY, report_every=REPORT_EVERY, abort_threshold=ABORT_AFTER)
37
+ controller = self.new(pipeline, cycle_period, report_every, abort_threshold)
38
+ Thread.new(controller) { |controller| controller.start }
39
+ end
40
+
41
+ def logger
42
+ self.class.logger
43
+ end
44
+
45
+ def start
46
+ sleep(@cycle_period)
47
+ cycle_number = 0
48
+ stalled_count = 0
49
+ Stud.interval(@cycle_period) do
50
+ @reports << Report.from_pipeline(@pipeline)
51
+ @reports.delete_at(0) if @reports.size > @report_every # expire old report
52
+ if cycle_number == (@report_every - 1) # it's report time!
53
+ logger.warn(@reports.last.to_hash)
54
+
55
+ if shutdown_stalled?
56
+ logger.error("The shutdown process appears to be stalled due to busy or blocked plugins. Check the logs for more information.") if stalled_count == 0
57
+ stalled_count += 1
58
+
59
+ if self.class.unsafe_shutdown? && @abort_threshold == stalled_count
60
+ logger.fatal("Forcefully quitting logstash..")
61
+ force_exit()
62
+ break
63
+ end
64
+ else
65
+ stalled_count = 0
66
+ end
67
+ end
68
+ cycle_number = (cycle_number + 1) % @report_every
69
+ end
70
+ end
71
+
72
+ # A pipeline shutdown is stalled if
73
+ # * at least REPORT_EVERY reports have been created
74
+ # * the inflight event count is in monotonically increasing
75
+ # * there are worker threads running which aren't blocked on SizedQueue pop/push
76
+ # * the stalled thread list is constant in the previous REPORT_EVERY reports
77
+ def shutdown_stalled?
78
+ return false unless @reports.size == @report_every #
79
+ # is stalled if inflight count is either constant or increasing
80
+ stalled_event_count = @reports.each_cons(2).all? do |prev_report, next_report|
81
+ prev_report.inflight_count["total"] <= next_report.inflight_count["total"]
82
+ end
83
+ if stalled_event_count
84
+ @reports.each_cons(2).all? do |prev_report, next_report|
85
+ prev_report.stalling_threads == next_report.stalling_threads
86
+ end
87
+ else
88
+ false
89
+ end
90
+ end
91
+
92
+ def force_exit
93
+ exit(-1)
94
+ end
95
+ end
96
+
97
+ class Report
98
+
99
+ attr_reader :inflight_count, :stalling_threads
100
+
101
+ def self.from_pipeline(pipeline)
102
+ new(pipeline.inflight_count, pipeline.stalling_threads)
103
+ end
104
+
105
+ def initialize(inflight_count, stalling_threads)
106
+ @inflight_count = inflight_count
107
+ @stalling_threads = format_threads_by_plugin(stalling_threads)
108
+ end
109
+
110
+ def to_hash
111
+ {
112
+ "INFLIGHT_EVENT_COUNT" => @inflight_count,
113
+ "STALLING_THREADS" => @stalling_threads
114
+ }
115
+ end
116
+
117
+ def format_threads_by_plugin(stalling_threads)
118
+ stalled_plugins = {}
119
+ stalling_threads.each do |thr|
120
+ key = (thr.delete("plugin") || "other")
121
+ stalled_plugins[key] ||= []
122
+ stalled_plugins[key] << thr
123
+ end
124
+ stalled_plugins
125
+ end
126
+ end
127
+ end
@@ -24,6 +24,41 @@ module LogStash::Util
24
24
  end
25
25
  end # def set_thread_name
26
26
 
27
+ def self.set_thread_plugin(plugin)
28
+ Thread.current[:plugin] = plugin
29
+ end
30
+
31
+ def self.get_thread_id(thread)
32
+ if RUBY_ENGINE == "jruby"
33
+ JRuby.reference(thread).native_thread.id
34
+ else
35
+ raise Exception.new("Native thread IDs aren't supported outside of JRuby")
36
+ end
37
+ end
38
+
39
+ def self.thread_info(thread)
40
+ backtrace = thread.backtrace.map do |line|
41
+ line.gsub(LogStash::Environment::LOGSTASH_HOME, "[...]")
42
+ end
43
+
44
+ blocked_on = case backtrace.first
45
+ when /in `push'/ then "blocked_on_push"
46
+ when /(?:pipeline|base).*pop/ then "waiting_for_events"
47
+ else nil
48
+ end
49
+
50
+ {
51
+ "thread_id" => get_thread_id(thread),
52
+ "name" => thread[:name],
53
+ "plugin" => (thread[:plugin] ? thread[:plugin].debug_info : nil),
54
+ "backtrace" => backtrace,
55
+ "blocked_on" => blocked_on,
56
+ "status" => thread.status,
57
+ "current_call" => backtrace.first
58
+ }
59
+ end
60
+
61
+
27
62
  # Merge hash 'src' into 'dst' nondestructively
28
63
  #
29
64
  # Duplicate keys will become array values
@@ -1,6 +1,6 @@
1
1
  # encoding: utf-8
2
2
  # The version of logstash.
3
- LOGSTASH_VERSION = "2.1.0.snapshot2"
3
+ LOGSTASH_VERSION = "2.1.0.snapshot3"
4
4
 
5
5
  # Note to authors: this should not include dashes because 'gem' barfs if
6
6
  # you include a dash in the version string.
@@ -187,3 +187,8 @@ en:
187
187
  debug: |+
188
188
  Most verbose logging. This causes 'debug'
189
189
  level logs to be emitted.
190
+ unsafe_shutdown: |+
191
+ Force logstash to exit during shutdown even
192
+ if there are still inflight events in memory.
193
+ By default, logstash will refuse to quit until all
194
+ received events have been pushed to the outputs.
@@ -504,6 +504,7 @@ describe LogStash::Event do
504
504
  let(:event2) { LogStash::Event.new({ "host" => "bar", "message" => "foo"}) }
505
505
 
506
506
  it "should cache only one template" do
507
+ LogStash::StringInterpolation::CACHE.clear
507
508
  expect {
508
509
  event1.to_s
509
510
  event2.to_s
@@ -0,0 +1,107 @@
1
+ # encoding: utf-8
2
+ require "spec_helper"
3
+ require "logstash/shutdown_controller"
4
+
5
+ describe LogStash::ShutdownController do
6
+
7
+ let(:check_every) { 0.01 }
8
+ let(:check_threshold) { 100 }
9
+ subject { LogStash::ShutdownController.new(pipeline, check_every) }
10
+ let(:pipeline) { double("pipeline") }
11
+ report_count = 0
12
+
13
+ before :each do
14
+ allow(LogStash::Report).to receive(:from_pipeline).and_wrap_original do |m, *args|
15
+ report_count += 1
16
+ m.call(*args)
17
+ end
18
+ end
19
+
20
+ after :each do
21
+ report_count = 0
22
+ end
23
+
24
+ context "when pipeline is stalled" do
25
+ let(:increasing_count) { (1..5000).to_a.map {|i| { "total" => i } } }
26
+ before :each do
27
+ allow(pipeline).to receive(:inflight_count).and_return(*increasing_count)
28
+ allow(pipeline).to receive(:stalling_threads) { { } }
29
+ end
30
+
31
+ describe ".unsafe_shutdown = true" do
32
+ let(:abort_threshold) { subject.abort_threshold }
33
+ let(:report_every) { subject.report_every }
34
+
35
+ before :each do
36
+ subject.class.unsafe_shutdown = true
37
+ end
38
+
39
+ it "should force the shutdown" do
40
+ expect(subject).to receive(:force_exit).once
41
+ subject.start
42
+ end
43
+
44
+ it "should do exactly \"abort_threshold\" stall checks" do
45
+ allow(subject).to receive(:force_exit)
46
+ expect(subject).to receive(:shutdown_stalled?).exactly(abort_threshold).times.and_call_original
47
+ subject.start
48
+ end
49
+
50
+ it "should do exactly \"abort_threshold\"*\"report_every\" stall checks" do
51
+ allow(subject).to receive(:force_exit)
52
+ expect(LogStash::Report).to receive(:from_pipeline).exactly(abort_threshold*report_every).times.and_call_original
53
+ subject.start
54
+ end
55
+ end
56
+
57
+ describe ".unsafe_shutdown = false" do
58
+
59
+ before :each do
60
+ subject.class.unsafe_shutdown = false
61
+ end
62
+
63
+ it "shouldn't force the shutdown" do
64
+ expect(subject).to_not receive(:force_exit)
65
+ thread = Thread.new(subject) {|subject| subject.start }
66
+ sleep 0.1 until report_count > check_threshold
67
+ thread.kill
68
+ end
69
+ end
70
+ end
71
+
72
+ context "when pipeline is not stalled" do
73
+ let(:decreasing_count) { (1..5000).to_a.reverse.map {|i| { "total" => i } } }
74
+ before :each do
75
+ allow(pipeline).to receive(:inflight_count).and_return(*decreasing_count)
76
+ allow(pipeline).to receive(:stalling_threads) { { } }
77
+ end
78
+
79
+ describe ".unsafe_shutdown = true" do
80
+
81
+ before :each do
82
+ subject.class.unsafe_shutdown = true
83
+ end
84
+
85
+ it "should force the shutdown" do
86
+ expect(subject).to_not receive(:force_exit)
87
+ thread = Thread.new(subject) {|subject| subject.start }
88
+ sleep 0.1 until report_count > check_threshold
89
+ thread.kill
90
+ end
91
+ end
92
+
93
+ describe ".unsafe_shutdown = false" do
94
+
95
+ before :each do
96
+ subject.class.unsafe_shutdown = false
97
+ end
98
+
99
+ it "shouldn't force the shutdown" do
100
+ expect(subject).to_not receive(:force_exit)
101
+ thread = Thread.new(subject) {|subject| subject.start }
102
+ sleep 0.1 until report_count > check_threshold
103
+ thread.kill
104
+ end
105
+ end
106
+ end
107
+ end
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: logstash-core
3
3
  version: !ruby/object:Gem::Version
4
- version: 2.1.0.snapshot2
4
+ version: 2.1.0.snapshot3
5
5
  platform: java
6
6
  authors:
7
7
  - Jordan Sissel
@@ -10,7 +10,7 @@ authors:
10
10
  autorequire:
11
11
  bindir: bin
12
12
  cert_chain: []
13
- date: 2015-11-18 00:00:00.000000000 Z
13
+ date: 2015-11-19 00:00:00.000000000 Z
14
14
  dependencies:
15
15
  - !ruby/object:Gem::Dependency
16
16
  requirement: !ruby/object:Gem::Requirement
@@ -249,6 +249,7 @@ files:
249
249
  - lib/logstash/plugin.rb
250
250
  - lib/logstash/program.rb
251
251
  - lib/logstash/runner.rb
252
+ - lib/logstash/shutdown_controller.rb
252
253
  - lib/logstash/sized_queue.rb
253
254
  - lib/logstash/string_interpolation.rb
254
255
  - lib/logstash/timestamp.rb
@@ -263,7 +264,6 @@ files:
263
264
  - lib/logstash/util/password.rb
264
265
  - lib/logstash/util/plugin_version.rb
265
266
  - lib/logstash/util/prctl.rb
266
- - lib/logstash/util/reporter.rb
267
267
  - lib/logstash/util/retryable.rb
268
268
  - lib/logstash/util/socket_peer.rb
269
269
  - lib/logstash/util/unicode_trimmer.rb
@@ -281,6 +281,7 @@ files:
281
281
  - spec/core/pipeline_spec.rb
282
282
  - spec/core/plugin_spec.rb
283
283
  - spec/core/runner_spec.rb
284
+ - spec/core/shutdown_controller_spec.rb
284
285
  - spec/core/timestamp_spec.rb
285
286
  - spec/coverage_helper.rb
286
287
  - spec/filters/base_spec.rb
@@ -343,6 +344,7 @@ test_files:
343
344
  - spec/core/pipeline_spec.rb
344
345
  - spec/core/plugin_spec.rb
345
346
  - spec/core/runner_spec.rb
347
+ - spec/core/shutdown_controller_spec.rb
346
348
  - spec/core/timestamp_spec.rb
347
349
  - spec/coverage_helper.rb
348
350
  - spec/filters/base_spec.rb
@@ -1,28 +0,0 @@
1
- # encoding: utf-8
2
- class InflightEventsReporter
3
- def self.logger=(logger)
4
- @logger = logger
5
- end
6
-
7
- def self.start(input_to_filter, filter_to_output, outputs)
8
- Thread.new do
9
- loop do
10
- sleep 5
11
- report(input_to_filter, filter_to_output, outputs)
12
- end
13
- end
14
- end
15
-
16
- def self.report(input_to_filter, filter_to_output, outputs)
17
- report = {
18
- "input_to_filter" => input_to_filter.size,
19
- "filter_to_output" => filter_to_output.size,
20
- "outputs" => []
21
- }
22
- outputs.each do |output|
23
- next unless output.worker_queue && output.worker_queue.size > 0
24
- report["outputs"] << [output.inspect, output.worker_queue.size]
25
- end
26
- @logger.warn ["INFLIGHT_EVENTS_REPORT", Time.now.iso8601, report]
27
- end
28
- end