hshek-logstash-output-sumologic 0.0.2
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +7 -0
- data/CHANGELOG.md +34 -0
- data/DEVELOPER.md +39 -0
- data/Gemfile +4 -0
- data/LICENSE +196 -0
- data/README.md +158 -0
- data/lib/logstash/outputs/sumologic.rb +158 -0
- data/lib/logstash/outputs/sumologic/batch.rb +13 -0
- data/lib/logstash/outputs/sumologic/common.rb +73 -0
- data/lib/logstash/outputs/sumologic/compressor.rb +39 -0
- data/lib/logstash/outputs/sumologic/header_builder.rb +52 -0
- data/lib/logstash/outputs/sumologic/message_queue.rb +57 -0
- data/lib/logstash/outputs/sumologic/monitor.rb +76 -0
- data/lib/logstash/outputs/sumologic/payload_builder.rb +159 -0
- data/lib/logstash/outputs/sumologic/piler.rb +89 -0
- data/lib/logstash/outputs/sumologic/sender.rb +172 -0
- data/lib/logstash/outputs/sumologic/statistics.rb +100 -0
- data/logstash-output-sumologic.gemspec +27 -0
- data/spec/outputs/sumologic/compressor_spec.rb +27 -0
- data/spec/outputs/sumologic/header_builder_spec.rb +244 -0
- data/spec/outputs/sumologic/message_queue_spec.rb +50 -0
- data/spec/outputs/sumologic/payload_builder_spec.rb +522 -0
- data/spec/outputs/sumologic/piler_spec.rb +154 -0
- data/spec/outputs/sumologic/sender_spec.rb +188 -0
- data/spec/outputs/sumologic_spec.rb +240 -0
- data/spec/test_server.rb +49 -0
- metadata +161 -0
@@ -0,0 +1,154 @@
|
|
1
|
+
# encoding: utf-8
|
2
|
+
require "logstash/devutils/rspec/spec_helper"
|
3
|
+
require "logstash/outputs/sumologic"
|
4
|
+
include LogStash::Outputs
|
5
|
+
|
6
|
+
describe SumoLogic::Piler do
|
7
|
+
|
8
|
+
event = LogStash::Event.new("foo" => "bar", "message" => "This is a log line")
|
9
|
+
event_10 = LogStash::Event.new("foo" => "bar", "message" => "1234567890")
|
10
|
+
|
11
|
+
before :each do
|
12
|
+
piler.start()
|
13
|
+
end
|
14
|
+
|
15
|
+
after :each do
|
16
|
+
queue.drain()
|
17
|
+
piler.stop()
|
18
|
+
end
|
19
|
+
|
20
|
+
context "working in pile mode if interval > 0 && pile_max > 0" do
|
21
|
+
let(:config) { {"queue_max" => 10, "interval" => 10, "pile_max" => 100 } }
|
22
|
+
let(:stats) { SumoLogic::Statistics.new }
|
23
|
+
let(:queue) { SumoLogic::MessageQueue.new(stats, config) }
|
24
|
+
let(:piler) { SumoLogic::Piler.new(queue, stats, config) }
|
25
|
+
specify {
|
26
|
+
expect(piler.is_pile).to be true
|
27
|
+
}
|
28
|
+
end # context
|
29
|
+
|
30
|
+
context "working in non-pile mode if interval <= 0" do
|
31
|
+
let(:config) { {"queue_max" => 10, "interval" => 0, "pile_max" => 100 } }
|
32
|
+
let(:stats) { SumoLogic::Statistics.new }
|
33
|
+
let(:queue) { SumoLogic::MessageQueue.new(stats, config) }
|
34
|
+
let(:piler) { SumoLogic::Piler.new(queue, stats, config) }
|
35
|
+
specify {
|
36
|
+
expect(piler.is_pile).to be false
|
37
|
+
}
|
38
|
+
end # context
|
39
|
+
|
40
|
+
context "working in non-pile mode if pile_max <= 0" do
|
41
|
+
let(:config) { {"queue_max" => 10, "interval" => 10, "pile_max" => 0 } }
|
42
|
+
let(:stats) { SumoLogic::Statistics.new }
|
43
|
+
let(:queue) { SumoLogic::MessageQueue.new(stats, config) }
|
44
|
+
let(:piler) { SumoLogic::Piler.new(queue, stats, config) }
|
45
|
+
specify {
|
46
|
+
expect(piler.is_pile).to be false
|
47
|
+
}
|
48
|
+
end # context
|
49
|
+
|
50
|
+
context "in non-pile mode" do
|
51
|
+
let(:config) { {"queue_max" => 10, "interval" => 0, "pile_max" => 100, "format" => "%{message}" } }
|
52
|
+
let(:stats) { SumoLogic::Statistics.new }
|
53
|
+
let(:queue) { SumoLogic::MessageQueue.new(stats, config) }
|
54
|
+
let(:piler) { SumoLogic::Piler.new(queue, stats, config) }
|
55
|
+
|
56
|
+
it "enque immediately after input" do
|
57
|
+
expect(stats.total_enque_times.value).to be 0
|
58
|
+
expect(queue.size).to be 0
|
59
|
+
piler.input(event)
|
60
|
+
expect(stats.total_enque_times.value).to be 1
|
61
|
+
expect(stats.total_enque_bytes.value).to be 18
|
62
|
+
expect(queue.size).to be 1
|
63
|
+
expect(queue.bytesize).to be 18
|
64
|
+
end
|
65
|
+
|
66
|
+
it "deque correctly" do
|
67
|
+
piler.input(event)
|
68
|
+
expect(queue.deq().payload).to eq "This is a log line"
|
69
|
+
expect(queue.size).to be 0
|
70
|
+
expect(queue.bytesize).to be 0
|
71
|
+
expect(stats.total_deque_times.value).to be 1
|
72
|
+
expect(stats.total_deque_bytes.value).to be 18
|
73
|
+
end
|
74
|
+
|
75
|
+
end # context
|
76
|
+
|
77
|
+
context "in pile mode" do
|
78
|
+
|
79
|
+
let(:config) { {"queue_max" => 10, "interval" => 5, "pile_max" => 25, "format" => "%{message}" } }
|
80
|
+
let(:stats) { SumoLogic::Statistics.new }
|
81
|
+
let(:queue) { SumoLogic::MessageQueue.new(stats, config) }
|
82
|
+
let(:piler) { SumoLogic::Piler.new(queue, stats, config) }
|
83
|
+
|
84
|
+
it "enqueue content from pile when reach pile_max" do
|
85
|
+
expect(queue.size).to be 0
|
86
|
+
piler.input(event_10)
|
87
|
+
expect(queue.size).to be 0
|
88
|
+
piler.input(event_10)
|
89
|
+
expect(queue.size).to be 0
|
90
|
+
piler.input(event_10)
|
91
|
+
expect(queue.size).to be 1
|
92
|
+
end
|
93
|
+
|
94
|
+
it "enqueue content from pile when reach interval" do
|
95
|
+
expect(queue.size).to be 0
|
96
|
+
piler.input(event_10)
|
97
|
+
expect(queue.size).to be 0
|
98
|
+
piler.input(event_10)
|
99
|
+
sleep(10)
|
100
|
+
expect(queue.size).to be 1
|
101
|
+
end
|
102
|
+
|
103
|
+
end # context
|
104
|
+
|
105
|
+
context "pile to message queue" do
|
106
|
+
|
107
|
+
let(:config) { {"queue_max" => 5, "interval" => 3, "pile_max" => 5, "format" => "%{message}"} }
|
108
|
+
let(:stats) { SumoLogic::Statistics.new }
|
109
|
+
let(:queue) { SumoLogic::MessageQueue.new(stats, config) }
|
110
|
+
let(:piler) { SumoLogic::Piler.new(queue, stats, config) }
|
111
|
+
|
112
|
+
it "block input thread if queue is full" do
|
113
|
+
input_t = Thread.new {
|
114
|
+
for i in 0..10 do
|
115
|
+
piler.input(event_10)
|
116
|
+
end
|
117
|
+
}
|
118
|
+
sleep(3)
|
119
|
+
expect(queue.size).to be 5
|
120
|
+
expect(queue.bytesize).to be 50
|
121
|
+
piler.stop()
|
122
|
+
queue.drain()
|
123
|
+
input_t.kill()
|
124
|
+
end
|
125
|
+
|
126
|
+
it "resume input thread if queue is drained" do
|
127
|
+
input_t = Thread.new {
|
128
|
+
for i in 0..10 do
|
129
|
+
piler.input(event_10)
|
130
|
+
end
|
131
|
+
}
|
132
|
+
sleep(5)
|
133
|
+
expect(stats.total_deque_times.value).to be 0
|
134
|
+
expect(queue.size).to be 5
|
135
|
+
expect(stats.total_enque_times.value).to be 5
|
136
|
+
queue.deq()
|
137
|
+
sleep(3)
|
138
|
+
expect(stats.total_deque_times.value).to be 1
|
139
|
+
expect(queue.size).to be 5
|
140
|
+
expect(stats.total_enque_times.value).to be 6
|
141
|
+
queue.deq()
|
142
|
+
sleep(3)
|
143
|
+
expect(stats.total_deque_times.value).to be 2
|
144
|
+
expect(queue.size).to be 5
|
145
|
+
expect(stats.total_enque_times.value).to be 7
|
146
|
+
piler.stop()
|
147
|
+
queue.drain()
|
148
|
+
input_t.kill()
|
149
|
+
end
|
150
|
+
|
151
|
+
end # context
|
152
|
+
|
153
|
+
end # describe
|
154
|
+
|
@@ -0,0 +1,188 @@
|
|
1
|
+
# encoding: utf-8
|
2
|
+
require "logstash/devutils/rspec/spec_helper"
|
3
|
+
require "rspec/eventually"
|
4
|
+
require "logstash/outputs/sumologic"
|
5
|
+
|
6
|
+
require_relative "../../test_server.rb"
|
7
|
+
|
8
|
+
describe LogStash::Outputs::SumoLogic::Sender do
|
9
|
+
|
10
|
+
before :all do
|
11
|
+
@@server = TestServer.new()
|
12
|
+
@@server.start()
|
13
|
+
end
|
14
|
+
|
15
|
+
before :each do
|
16
|
+
@@server.response = TestServer::RESPONSE_200
|
17
|
+
@@server.drain()
|
18
|
+
end
|
19
|
+
|
20
|
+
after :all do
|
21
|
+
@@server.stop()
|
22
|
+
end
|
23
|
+
|
24
|
+
context "connect()" do
|
25
|
+
let(:stats) { LogStash::Outputs::SumoLogic::Statistics.new() }
|
26
|
+
let(:queue) { LogStash::Outputs::SumoLogic::MessageQueue.new(stats, "queue_max" => 10) }
|
27
|
+
let(:sender) { LogStash::Outputs::SumoLogic::Sender.new(false, queue, stats, "url" => "http://localhost:#{TestServer::PORT}") }
|
28
|
+
|
29
|
+
it "should return true if sever response 200" do
|
30
|
+
expect(sender.connect()).to be true
|
31
|
+
result = @@server.drain()
|
32
|
+
expect(result.size).to eq(1)
|
33
|
+
end
|
34
|
+
|
35
|
+
it "should return false if sever response 429" do
|
36
|
+
@@server.response = TestServer::RESPONSE_429
|
37
|
+
expect(sender.connect()).to be false
|
38
|
+
result = @@server.drain()
|
39
|
+
expect(result.size).to eq(1)
|
40
|
+
end
|
41
|
+
|
42
|
+
it "should return false if sever cannot reach" do
|
43
|
+
sender = LogStash::Outputs::SumoLogic::Sender.new(false, queue, stats, "url" => "http://localhost:#{TestServer::PORT + 1}")
|
44
|
+
expect(sender.connect()).to be false
|
45
|
+
result = @@server.drain()
|
46
|
+
expect(result.size).to eq(0)
|
47
|
+
end
|
48
|
+
|
49
|
+
end # context
|
50
|
+
|
51
|
+
context "single sender" do
|
52
|
+
let(:plugin) { LogStash::Outputs::SumoLogic.new("url" => "http://localhost:#{TestServer::PORT}", "sender_max" => 1, "sleep_before_requeue" => 1, "request_timeout" => 3) }
|
53
|
+
let(:event) { LogStash::Event.new("host" => "myHost", "message" => "Hello world") }
|
54
|
+
|
55
|
+
it "should send message correctly" do
|
56
|
+
plugin.register
|
57
|
+
plugin.receive(event)
|
58
|
+
expect { plugin.stats.total_output_requests.value }.to eventually(eq(1)).pause_for(1)
|
59
|
+
expect { plugin.stats.total_response("200") }.to eventually(eq(1)).pause_for(1)
|
60
|
+
result = @@server.drain()
|
61
|
+
expect(result.size).to eq(2)
|
62
|
+
plugin.receive(event)
|
63
|
+
expect { plugin.stats.total_output_requests.value }.to eventually(eq(2)).pause_for(1)
|
64
|
+
expect { plugin.stats.total_response("200") }.to eventually(eq(2)).pause_for(1)
|
65
|
+
result = @@server.drain()
|
66
|
+
expect(result.size).to eq(1)
|
67
|
+
end
|
68
|
+
|
69
|
+
it "should re-enque the message if got 429" do
|
70
|
+
plugin.register
|
71
|
+
plugin.receive(event)
|
72
|
+
expect { plugin.stats.total_output_requests.value }.to eventually(eq(1)).pause_for(1)
|
73
|
+
expect { plugin.stats.total_response("200") }.to eventually(eq(1)).pause_for(1)
|
74
|
+
result = @@server.drain()
|
75
|
+
expect(result.size).to eq(2)
|
76
|
+
@@server.response = TestServer::RESPONSE_429
|
77
|
+
plugin.receive(event)
|
78
|
+
expect { plugin.stats.total_output_requests.value }.to eventually(be > 1).within(10).pause_for(1)
|
79
|
+
expect { plugin.stats.total_response("429") }.to eventually(be > 1).within(10).pause_for(1)
|
80
|
+
result = @@server.drain()
|
81
|
+
expect(result.size).to be > 0
|
82
|
+
@@server.response = TestServer::RESPONSE_200
|
83
|
+
expect { plugin.stats.total_response("200") }.to eventually(be > 1).within(10).pause_for(1)
|
84
|
+
result = @@server.drain()
|
85
|
+
expect(result.size).to be > 0
|
86
|
+
end
|
87
|
+
|
88
|
+
it "should re-enque the message if network failed" do
|
89
|
+
plugin.register
|
90
|
+
plugin.receive(event)
|
91
|
+
expect { plugin.stats.total_output_requests.value }.to eventually(eq(1)).pause_for(1)
|
92
|
+
expect { plugin.stats.total_response("200") }.to eventually(eq(1)).pause_for(1)
|
93
|
+
result = @@server.drain()
|
94
|
+
expect(result.size).to eq(2)
|
95
|
+
@@server.stop()
|
96
|
+
plugin.receive(event)
|
97
|
+
expect { plugin.stats.total_output_requests.value }.to eventually(be > 1).within(10).pause_for(1)
|
98
|
+
expect { plugin.stats.total_response("failure") }.to eventually(be > 1).within(25).pause_for(1)
|
99
|
+
result = @@server.drain()
|
100
|
+
@@server.start()
|
101
|
+
@@server.response = TestServer::RESPONSE_200
|
102
|
+
expect { plugin.stats.total_response("200") }.to eventually(be > 1).within(10).pause_for(1)
|
103
|
+
result = @@server.drain()
|
104
|
+
expect(result.size).to be > 0
|
105
|
+
end
|
106
|
+
|
107
|
+
end # context
|
108
|
+
|
109
|
+
context "multiple senders" do
|
110
|
+
let(:plugin) { LogStash::Outputs::SumoLogic.new("url" => "http://localhost:#{TestServer::PORT}", "sender_max" => 10, "sleep_before_requeue" => 1, "request_timeout" => 3) }
|
111
|
+
let(:event) { LogStash::Event.new("host" => "myHost", "message" => "Hello world") }
|
112
|
+
|
113
|
+
it "should send message correctly" do
|
114
|
+
plugin.register
|
115
|
+
plugin.receive(event)
|
116
|
+
expect { plugin.stats.total_output_requests.value }.to eventually(eq(1)).pause_for(1)
|
117
|
+
expect { plugin.stats.total_response("200") }.to eventually(eq(1)).pause_for(1)
|
118
|
+
result = @@server.drain()
|
119
|
+
expect(result.size).to eq(2)
|
120
|
+
plugin.receive(event)
|
121
|
+
expect { plugin.stats.total_output_requests.value }.to eventually(eq(2)).pause_for(1)
|
122
|
+
expect { plugin.stats.total_response("200") }.to eventually(eq(2)).pause_for(1)
|
123
|
+
result = @@server.drain()
|
124
|
+
expect(result.size).to eq(1)
|
125
|
+
end
|
126
|
+
|
127
|
+
it "should re-enque the message if got 429" do
|
128
|
+
plugin.register
|
129
|
+
plugin.receive(event)
|
130
|
+
expect { plugin.stats.total_output_requests.value }.to eventually(eq(1)).pause_for(1)
|
131
|
+
expect { plugin.stats.total_response("200") }.to eventually(eq(1)).pause_for(1)
|
132
|
+
result = @@server.drain()
|
133
|
+
expect(result.size).to eq(2)
|
134
|
+
@@server.response = TestServer::RESPONSE_429
|
135
|
+
plugin.receive(event)
|
136
|
+
expect { plugin.stats.total_output_requests.value }.to eventually(be > 1).within(10).pause_for(1)
|
137
|
+
expect { plugin.stats.total_response("429") }.to eventually(be > 1).within(10).pause_for(1)
|
138
|
+
result = @@server.drain()
|
139
|
+
expect(result.size).to be > 0
|
140
|
+
@@server.response = TestServer::RESPONSE_200
|
141
|
+
expect { plugin.stats.total_response("200") }.to eventually(be > 1).within(10).pause_for(1)
|
142
|
+
result = @@server.drain()
|
143
|
+
expect(result.size).to be > 0
|
144
|
+
end
|
145
|
+
|
146
|
+
it "should re-enque the message if network failed" do
|
147
|
+
plugin.register
|
148
|
+
plugin.receive(event)
|
149
|
+
expect { plugin.stats.total_output_requests.value }.to eventually(eq(1)).pause_for(1)
|
150
|
+
expect { plugin.stats.total_response("200") }.to eventually(eq(1)).pause_for(1)
|
151
|
+
result = @@server.drain()
|
152
|
+
expect(result.size).to eq(2)
|
153
|
+
@@server.stop()
|
154
|
+
plugin.receive(event)
|
155
|
+
expect { plugin.stats.total_output_requests.value }.to eventually(be > 1).within(10).pause_for(1)
|
156
|
+
expect { plugin.stats.total_response("failure") }.to eventually(be > 1).within(25).pause_for(1)
|
157
|
+
result = @@server.drain()
|
158
|
+
@@server.start()
|
159
|
+
@@server.response = TestServer::RESPONSE_200
|
160
|
+
expect { plugin.stats.total_response("200") }.to eventually(be > 1).within(10).pause_for(1)
|
161
|
+
result = @@server.drain()
|
162
|
+
expect(result.size).to be > 0
|
163
|
+
end
|
164
|
+
|
165
|
+
it "should reuse tokens" do
|
166
|
+
plugin.register
|
167
|
+
30.times { plugin.receive(event) }
|
168
|
+
expect { plugin.stats.total_response("200") }.to eventually(eq 30).within(100).pause_for(1)
|
169
|
+
end
|
170
|
+
|
171
|
+
end # context
|
172
|
+
|
173
|
+
context "close()" do
|
174
|
+
|
175
|
+
let(:plugin) { LogStash::Outputs::SumoLogic.new("url" => "http://localhost:#{TestServer::PORT}", "sender_max" => 10) }
|
176
|
+
let(:event) { LogStash::Event.new("host" => "myHost", "message" => "Hello world") }
|
177
|
+
|
178
|
+
it "should drain out messages" do
|
179
|
+
plugin.register
|
180
|
+
30.times { plugin.receive(event) }
|
181
|
+
plugin.close
|
182
|
+
expect(plugin.stats.total_response("200")).to eq 30
|
183
|
+
end
|
184
|
+
|
185
|
+
end # context
|
186
|
+
|
187
|
+
end # describe
|
188
|
+
|
@@ -0,0 +1,240 @@
|
|
1
|
+
# encoding: utf-8
|
2
|
+
require "logstash/devutils/rspec/spec_helper"
|
3
|
+
require "rspec/eventually"
|
4
|
+
require "logstash/outputs/sumologic"
|
5
|
+
|
6
|
+
describe LogStash::Outputs::SumoLogic, :unless => (ENV["sumo_url"].to_s.empty?) do
|
7
|
+
|
8
|
+
before :each do
|
9
|
+
plugin.register()
|
10
|
+
end
|
11
|
+
|
12
|
+
after :each do
|
13
|
+
plugin.close()
|
14
|
+
end
|
15
|
+
|
16
|
+
context "default configuration" do
|
17
|
+
let(:plugin) {
|
18
|
+
LogStash::Outputs::SumoLogic.new("url" => ENV["sumo_url"])
|
19
|
+
}
|
20
|
+
|
21
|
+
it "cookies is by default disabled" do
|
22
|
+
expect(plugin.cookies).to be false
|
23
|
+
end
|
24
|
+
end
|
25
|
+
|
26
|
+
context "no pile" do
|
27
|
+
context "single sender" do
|
28
|
+
context "send log in json" do
|
29
|
+
let(:plugin) {
|
30
|
+
LogStash::Outputs::SumoLogic.new(
|
31
|
+
"url" => ENV["sumo_url"],
|
32
|
+
"sender_max" => 1,
|
33
|
+
"format" => "%{@json}")
|
34
|
+
}
|
35
|
+
specify {
|
36
|
+
event = LogStash::Event.new("host" => "myHost", "message" => "Hello world")
|
37
|
+
plugin.receive(event)
|
38
|
+
expect { plugin.stats.total_output_requests.value }.to eventually(be > 0).within(10).pause_for(1)
|
39
|
+
expect { plugin.stats.total_response("200") }.to eventually(be > 0).within(10).pause_for(1)
|
40
|
+
}
|
41
|
+
end
|
42
|
+
context "send fields as metrics" do
|
43
|
+
let(:plugin) {
|
44
|
+
LogStash::Outputs::SumoLogic.new(
|
45
|
+
"url" => ENV["sumo_url"],
|
46
|
+
"sender_max" => 1,
|
47
|
+
"fields_as_metrics" => true,
|
48
|
+
"intrinsic_tags" => {
|
49
|
+
"host"=>"%{host}"
|
50
|
+
},
|
51
|
+
"meta_tags" => {
|
52
|
+
"foo" => "%{foo}"
|
53
|
+
})
|
54
|
+
}
|
55
|
+
|
56
|
+
specify {
|
57
|
+
event = LogStash::Event.new(
|
58
|
+
"host" => "myHost",
|
59
|
+
"foo" => "fancy",
|
60
|
+
"cpu" => [0.24, 0.11, 0.75, 0.28],
|
61
|
+
"storageRW" => 51,
|
62
|
+
"bar" => "blahblah",
|
63
|
+
"blkio" => {
|
64
|
+
"write_ps" => 5,
|
65
|
+
"read_ps" => 2,
|
66
|
+
"total_ps" => 0
|
67
|
+
})
|
68
|
+
|
69
|
+
plugin.receive(event)
|
70
|
+
expect { plugin.stats.total_output_requests.value }.to eventually(be > 0).within(10).pause_for(1)
|
71
|
+
expect { plugin.stats.total_response("200") }.to eventually(be > 0).within(10).pause_for(1)
|
72
|
+
}
|
73
|
+
end
|
74
|
+
end
|
75
|
+
context "multiple senders" do
|
76
|
+
context "send log in json" do
|
77
|
+
let(:plugin) {
|
78
|
+
LogStash::Outputs::SumoLogic.new(
|
79
|
+
"url" => ENV["sumo_url"],
|
80
|
+
"sender_max" => 5,
|
81
|
+
"format" => "%{@json}")
|
82
|
+
}
|
83
|
+
|
84
|
+
specify {
|
85
|
+
5.times { |t|
|
86
|
+
event = LogStash::Event.new("host" => "myHost", "message" => "Hello world - #{t}")
|
87
|
+
plugin.receive(event)
|
88
|
+
}
|
89
|
+
expect { plugin.stats.total_output_requests.value }.to eventually(be > 0).within(10).pause_for(1)
|
90
|
+
expect { plugin.stats.total_response("200") }.to eventually(be > 0).within(10).pause_for(1)
|
91
|
+
}
|
92
|
+
end
|
93
|
+
|
94
|
+
context "send multiple log in json" do
|
95
|
+
let(:plugin) {
|
96
|
+
LogStash::Outputs::SumoLogic.new(
|
97
|
+
"url" => ENV["sumo_url"],
|
98
|
+
"sender_max" => 5,
|
99
|
+
"format" => "%{@json}"
|
100
|
+
)
|
101
|
+
}
|
102
|
+
|
103
|
+
specify {
|
104
|
+
5.times { |t|
|
105
|
+
events = 10.times.map { |r|
|
106
|
+
LogStash::Event.new("host" => "myHost", "message" => "Hello world - #{t} - #{r}")
|
107
|
+
}
|
108
|
+
plugin.multi_receive(events)
|
109
|
+
}
|
110
|
+
expect { plugin.stats.total_output_requests.value }.to eventually(be > 0).within(10).pause_for(1)
|
111
|
+
expect { plugin.stats.total_response("200") }.to eventually(be > 0).within(10).pause_for(1)
|
112
|
+
}
|
113
|
+
end
|
114
|
+
end
|
115
|
+
end
|
116
|
+
context "has pile" do
|
117
|
+
context "single sender" do
|
118
|
+
context "send log in json" do
|
119
|
+
let(:plugin) {
|
120
|
+
LogStash::Outputs::SumoLogic.new(
|
121
|
+
"url" => ENV["sumo_url"],
|
122
|
+
"sender_max" => 1,
|
123
|
+
"interval" => 3,
|
124
|
+
"format" => "%{@json}")
|
125
|
+
}
|
126
|
+
|
127
|
+
specify {
|
128
|
+
5.times { |t|
|
129
|
+
event = LogStash::Event.new("host" => "myHost", "message" => "Hello world - #{t}")
|
130
|
+
plugin.receive(event)
|
131
|
+
}
|
132
|
+
expect { plugin.stats.total_output_requests.value }.to eventually(be > 0).within(10).pause_for(1)
|
133
|
+
expect { plugin.stats.total_response("200") }.to eventually(be > 0).within(10).pause_for(1)
|
134
|
+
}
|
135
|
+
end
|
136
|
+
|
137
|
+
context "send multiple log in json" do
|
138
|
+
let(:plugin) {
|
139
|
+
LogStash::Outputs::SumoLogic.new(
|
140
|
+
"url" => ENV["sumo_url"],
|
141
|
+
"sender_max" => 1,
|
142
|
+
"interval" => 3,
|
143
|
+
"format" => "%{@json}"
|
144
|
+
)
|
145
|
+
}
|
146
|
+
|
147
|
+
specify {
|
148
|
+
5.times { |t|
|
149
|
+
events = 10.times.map { |r|
|
150
|
+
LogStash::Event.new("host" => "myHost", "message" => "Hello world - #{t} - #{r}")
|
151
|
+
}
|
152
|
+
plugin.multi_receive(events)
|
153
|
+
}
|
154
|
+
expect { plugin.stats.total_output_requests.value }.to eventually(be > 0).within(10).pause_for(1)
|
155
|
+
expect { plugin.stats.total_response("200") }.to eventually(be > 0).within(10).pause_for(1)
|
156
|
+
}
|
157
|
+
end
|
158
|
+
end
|
159
|
+
context "multi senders" do
|
160
|
+
context "send log in json" do
|
161
|
+
|
162
|
+
let(:plugin) {
|
163
|
+
LogStash::Outputs::SumoLogic.new(
|
164
|
+
"url" => ENV["sumo_url"],
|
165
|
+
"sender_max" => 5,
|
166
|
+
"interval" => 3,
|
167
|
+
"format" => "%{@json}")
|
168
|
+
}
|
169
|
+
|
170
|
+
specify {
|
171
|
+
5.times { |t|
|
172
|
+
event = LogStash::Event.new("host" => "myHost", "message" => "Hello world - #{t}")
|
173
|
+
plugin.receive(event)
|
174
|
+
}
|
175
|
+
expect { plugin.stats.total_output_requests.value }.to eventually(be > 0).within(10).pause_for(1)
|
176
|
+
expect { plugin.stats.total_response("200") }.to eventually(be > 0).within(10).pause_for(1)
|
177
|
+
}
|
178
|
+
end
|
179
|
+
context "send multiple log in json" do
|
180
|
+
let(:plugin) {
|
181
|
+
LogStash::Outputs::SumoLogic.new(
|
182
|
+
"url" => ENV["sumo_url"],
|
183
|
+
"sender_max" => 5,
|
184
|
+
"interval" => 3,
|
185
|
+
"format" => "%{@json}"
|
186
|
+
)
|
187
|
+
}
|
188
|
+
|
189
|
+
specify {
|
190
|
+
5.times { |t|
|
191
|
+
events = 10.times.map { |r|
|
192
|
+
LogStash::Event.new("host" => "myHost", "message" => "Hello world - #{t} - #{r}")
|
193
|
+
}
|
194
|
+
plugin.multi_receive(events)
|
195
|
+
}
|
196
|
+
expect { plugin.stats.total_output_requests.value }.to eventually(be > 0).within(10).pause_for(1)
|
197
|
+
expect { plugin.stats.total_response("200") }.to eventually(be > 0).within(10).pause_for(1)
|
198
|
+
}
|
199
|
+
end
|
200
|
+
end
|
201
|
+
end
|
202
|
+
|
203
|
+
@@map = [('a'..'z'), ('A'..'Z')].map(&:to_a).flatten
|
204
|
+
def get_line(length)
|
205
|
+
length.times.map { @@map[rand(@@map.length)] }.join
|
206
|
+
end
|
207
|
+
|
208
|
+
context "throughput baseline" do
|
209
|
+
let(:plugin) {
|
210
|
+
LogStash::Outputs::SumoLogic.new(
|
211
|
+
"url" => ENV["sumo_url"],
|
212
|
+
"source_category" => "logstash_ci_baseline",
|
213
|
+
"sender_max" => 100,
|
214
|
+
"interval" => 30,
|
215
|
+
"format" => "%{@timestamp} %{message}",
|
216
|
+
"compress" => true,
|
217
|
+
"compress_encoding" => "gzip",
|
218
|
+
"stats_enabled" => true,
|
219
|
+
"stats_interval" => 1
|
220
|
+
)
|
221
|
+
}
|
222
|
+
|
223
|
+
log_length = 5000 + rand(1000)
|
224
|
+
log_count = 50000 + rand(10000)
|
225
|
+
|
226
|
+
specify {
|
227
|
+
log_count.times { |t|
|
228
|
+
event = LogStash::Event.new("message" => "#{t} - #{get_line(log_length)}")
|
229
|
+
plugin.receive(event)
|
230
|
+
}
|
231
|
+
expect { plugin.stats.total_log_lines.value }.to eventually(be >= log_count).within(60).pause_for(1)
|
232
|
+
bytes = plugin.stats.total_output_bytes.value
|
233
|
+
spend = (Time.now - plugin.stats.initialize_time) * 1000
|
234
|
+
rate = bytes / spend * 1000
|
235
|
+
puts "Sent #{plugin.stats.total_log_lines.value} log lines with #{bytes} bytes in #{'%.2f' % spend} ms, rate #{'%.2f' % (rate/1024/1024) } MB/s."
|
236
|
+
expect(rate).to be > 2_000_000
|
237
|
+
}
|
238
|
+
end
|
239
|
+
|
240
|
+
end # describe
|