logstash-output-scalyr 0.2.7.beta → 0.2.9.beta

Sign up to get free protection for your applications and to get access to all the features.
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: logstash-output-scalyr
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.2.7.beta
4
+ version: 0.2.9.beta
5
5
  platform: ruby
6
6
  authors:
7
7
  - Edward Chee
8
8
  autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2022-08-04 00:00:00.000000000 Z
11
+ date: 2022-11-08 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  requirement: !ruby/object:Gem::Requirement
@@ -165,16 +165,6 @@ files:
165
165
  - lib/scalyr/common/util.rb
166
166
  - lib/scalyr/constants.rb
167
167
  - logstash-output-scalyr.gemspec
168
- - spec/benchmarks/bignum_fixing.rb
169
- - spec/benchmarks/flattening_and_serialization.rb
170
- - spec/benchmarks/json_serialization.rb
171
- - spec/benchmarks/metrics_overhead.rb
172
- - spec/benchmarks/set_session_level_serverhost_on_events.rb
173
- - spec/benchmarks/util.rb
174
- - spec/logstash/outputs/fixtures/example_com.pem
175
- - spec/logstash/outputs/scalyr_integration_spec.rb
176
- - spec/logstash/outputs/scalyr_spec.rb
177
- - spec/scalyr/common/util_spec.rb
178
168
  homepage: https://www.scalyr.com/help/data-sources#logstash
179
169
  licenses:
180
170
  - Apache-2.0
@@ -201,14 +191,4 @@ rubygems_version: 2.7.10
201
191
  signing_key:
202
192
  specification_version: 4
203
193
  summary: Scalyr output plugin for Logstash
204
- test_files:
205
- - spec/benchmarks/bignum_fixing.rb
206
- - spec/benchmarks/flattening_and_serialization.rb
207
- - spec/benchmarks/json_serialization.rb
208
- - spec/benchmarks/metrics_overhead.rb
209
- - spec/benchmarks/set_session_level_serverhost_on_events.rb
210
- - spec/benchmarks/util.rb
211
- - spec/logstash/outputs/fixtures/example_com.pem
212
- - spec/logstash/outputs/scalyr_integration_spec.rb
213
- - spec/logstash/outputs/scalyr_spec.rb
214
- - spec/scalyr/common/util_spec.rb
194
+ test_files: []
@@ -1,87 +0,0 @@
1
- require 'benchmark'
2
- require 'quantile'
3
-
4
- require_relative '../../lib/scalyr/common/util'
5
- require_relative './util'
6
-
7
- # Micro benchmark which measures how long it takes to find all the Bignums in a record and convert them to strings
8
-
9
- ITERATIONS = 500
10
-
11
- def rand_bignum()
12
- return 200004000020304050300 + rand(999999)
13
- end
14
-
15
- def generate_hash(widths)
16
- result = {}
17
- if widths.empty?
18
- return rand_bignum()
19
- else
20
- widths[0].times do
21
- result[rand_str(9)] = generate_hash(widths[1..widths.length])
22
- end
23
- return result
24
- end
25
- end
26
-
27
- def generate_data_array_for_spec(spec)
28
- data = []
29
- ITERATIONS.times do
30
- data << generate_hash(spec)
31
- end
32
-
33
- data
34
- end
35
-
36
- def run_benchmark_and_print_results(data, run_benchmark_func)
37
- puts ""
38
- puts "Using %s total keys in a hash" % [Scalyr::Common::Util.flatten(data[0]).count]
39
- puts ""
40
-
41
- result = []
42
- ITERATIONS.times do |i|
43
- result << Benchmark.measure { run_benchmark_func.(data[i]) }
44
- end
45
-
46
- sum = result.inject(nil) { |sum, t| sum.nil? ? sum = t : sum += t }
47
- avg = sum / result.size
48
-
49
- Benchmark.bm(7, "sum:", "avg:") do |b|
50
- [sum, avg]
51
- end
52
- puts ""
53
- end
54
-
55
-
56
- puts "Using %s iterations" % [ITERATIONS]
57
- puts ""
58
-
59
- @value = Quantile::Estimator.new
60
- @prng = Random.new
61
-
62
- def convert_bignums(record)
63
- Scalyr::Common::Util.convert_bignums(record)
64
- end
65
-
66
- puts "Util.convert_bignums()"
67
- puts "==============================="
68
-
69
- # Around ~200 keys in a hash
70
- data = generate_data_array_for_spec([4, 4, 3, 4])
71
- run_benchmark_and_print_results(data, method(:convert_bignums))
72
-
73
- # Around ~200 keys in a hash (single level)
74
- data = generate_data_array_for_spec([200])
75
- run_benchmark_and_print_results(data, method(:convert_bignums))
76
-
77
- # Around ~512 keys in a hash
78
- data = generate_data_array_for_spec([8, 4, 4, 4])
79
- run_benchmark_and_print_results(data, method(:convert_bignums))
80
-
81
- # Around ~960 keys in a hash
82
- data = generate_data_array_for_spec([12, 5, 4, 4])
83
- run_benchmark_and_print_results(data, method(:convert_bignums))
84
-
85
- # Around ~2700 keys in a hash
86
- data = generate_data_array_for_spec([14, 8, 6, 4])
87
- run_benchmark_and_print_results(data, method(:convert_bignums))
@@ -1,100 +0,0 @@
1
- require 'benchmark'
2
- require 'json'
3
-
4
- require_relative '../../lib/scalyr/common/util'
5
- require_relative './util'
6
-
7
- # NOTE: When using jRuby using multiple iterations with the same dataset doesn't make
8
- # sense since it will just use JITed version of the code which will be very fast. If we
9
- # wanted to accurately measure using multiple iterations we would need te different
10
- # input data for each iteration.
11
- ITERATIONS = 500
12
-
13
- def run_benchmark_and_print_results(data, run_benchmark_func)
14
- puts ""
15
- puts "Using %s total keys in a hash" % [Scalyr::Common::Util.flatten(data[0]).count]
16
- puts ""
17
-
18
- result = []
19
- ITERATIONS.times do |i|
20
- result << Benchmark.measure { run_benchmark_func.(data[i]) }
21
- end
22
-
23
- sum = result.inject(nil) { |sum, t| sum.nil? ? sum = t : sum += t }
24
- avg = sum / result.size
25
-
26
- Benchmark.bm(7, "sum:", "avg:") do |b|
27
- [sum, avg]
28
- end
29
- puts ""
30
- end
31
-
32
- def flatten_data_func(data)
33
- Scalyr::Common::Util.flatten(data)
34
- end
35
-
36
- def json_serialize_data(data)
37
- data.to_json
38
- end
39
-
40
- DATASETS = {
41
- :keys_50 => generate_data_array_for_spec([3, 3, 3, 2]),
42
- :keys_200 => generate_data_array_for_spec([4, 4, 3, 4]),
43
- :keys_200_flat => generate_data_array_for_spec([200]),
44
- :keys_512 => generate_data_array_for_spec([8, 4, 4, 4]),
45
- :keys_960 => generate_data_array_for_spec([12, 5, 4, 4]),
46
- :keys_2700 => generate_data_array_for_spec([14, 8, 6, 4])
47
- }
48
-
49
- puts "Using %s iterations" % [ITERATIONS]
50
- puts ""
51
-
52
- puts "Scalyr::Common::Util.flatten()"
53
- puts "==============================="
54
-
55
- # Around ~50 keys in a hash
56
- data = DATASETS[:keys_50]
57
- run_benchmark_and_print_results(data, method(:flatten_data_func))
58
-
59
- # Around ~200 keys in a hash
60
- data = DATASETS[:keys_200]
61
- run_benchmark_and_print_results(data, method(:flatten_data_func))
62
-
63
- # Around ~200 keys in a hash (single level)
64
- data = DATASETS[:keys_200_flat]
65
- run_benchmark_and_print_results(data, method(:flatten_data_func))
66
-
67
- # Around ~512 keys in a hash
68
- data = DATASETS[:keys_512]
69
- run_benchmark_and_print_results(data, method(:flatten_data_func))
70
-
71
- # Around ~960 keys in a hash
72
- data = DATASETS[:keys_960]
73
- run_benchmark_and_print_results(data, method(:flatten_data_func))
74
-
75
- # Around ~2700 keys in a hash
76
- data = DATASETS[:keys_2700]
77
- run_benchmark_and_print_results(data, method(:flatten_data_func))
78
-
79
- puts "JSON.dumps (hash.to_dict)"
80
- puts "==============================="
81
-
82
- # Around ~200 keys in a hash
83
- data = generate_data_array_for_spec([4, 4, 3, 4])
84
- run_benchmark_and_print_results(data, method(:json_serialize_data))
85
-
86
- # Around ~200 keys in a hash (single level)
87
- data = DATASETS[:keys_200_flat]
88
- run_benchmark_and_print_results(data, method(:json_serialize_data))
89
-
90
- # Around ~512 keys in a hash
91
- data = generate_data_array_for_spec([8, 4, 4, 4])
92
- run_benchmark_and_print_results(data, method(:json_serialize_data))
93
-
94
- # Around ~960 keys in a hash
95
- data = generate_data_array_for_spec([12, 5, 4, 4])
96
- run_benchmark_and_print_results(data, method(:json_serialize_data))
97
-
98
- # Around ~2700 keys in a hash
99
- data = generate_data_array_for_spec([14, 8, 6, 4])
100
- run_benchmark_and_print_results(data, method(:json_serialize_data))
@@ -1,85 +0,0 @@
1
- require 'benchmark'
2
- require 'json'
3
- require 'jrjackson'
4
-
5
- require_relative '../../lib/scalyr/common/util'
6
- require_relative './util'
7
-
8
- ITERATIONS = 500
9
-
10
- def json_serialize_data_native(data)
11
- data.to_json
12
- end
13
-
14
- def json_serialize_data_jrjackson(data)
15
- JrJackson::Json.dump(data)
16
- end
17
-
18
- DATASETS = {
19
- :keys_50 => generate_data_array_for_spec([3, 3, 3, 2]),
20
- :keys_200 => generate_data_array_for_spec([4, 4, 3, 4]),
21
- :keys_200_flat => generate_data_array_for_spec([200]),
22
- :keys_512 => generate_data_array_for_spec([8, 4, 4, 4]),
23
- :keys_960 => generate_data_array_for_spec([12, 5, 4, 4]),
24
- :keys_2700 => generate_data_array_for_spec([14, 8, 6, 4])
25
- }
26
-
27
- def run_benchmark_and_print_results(data, run_benchmark_func)
28
- puts ""
29
- puts "Using %s total keys in a hash" % [Scalyr::Common::Util.flatten(data[0]).count]
30
- puts ""
31
-
32
- result = []
33
- ITERATIONS.times do |i|
34
- result << Benchmark.measure { run_benchmark_func.(data[i]) }
35
- end
36
-
37
- sum = result.inject(nil) { |sum, t| sum.nil? ? sum = t : sum += t }
38
- avg = sum / result.size
39
-
40
- Benchmark.bm(7, "sum:", "avg:") do |b|
41
- [sum, avg]
42
- end
43
- puts ""
44
- end
45
-
46
- puts "Using %s iterations" % [ITERATIONS]
47
- puts ""
48
-
49
- puts "native"
50
- puts "==============================="
51
-
52
- # Around ~50 keys in a hash
53
- data = DATASETS[:keys_50]
54
- run_benchmark_and_print_results(data, method(:json_serialize_data_native))
55
-
56
- # Around ~200 keys in a hash
57
- data = DATASETS[:keys_200]
58
- run_benchmark_and_print_results(data, method(:json_serialize_data_native))
59
-
60
- # Around ~200 keys in a hash (single level)
61
- data = DATASETS[:keys_200_flat]
62
- run_benchmark_and_print_results(data, method(:json_serialize_data_native))
63
-
64
- # Around ~2700 keys in a hash
65
- data = DATASETS[:keys_2700]
66
- run_benchmark_and_print_results(data, method(:json_serialize_data_native))
67
-
68
- puts "jrjackson"
69
- puts "==============================="
70
-
71
- # Around ~50 keys in a hash
72
- data = DATASETS[:keys_50]
73
- run_benchmark_and_print_results(data, method(:json_serialize_data_jrjackson))
74
-
75
- # Around ~200 keys in a hash
76
- data = DATASETS[:keys_200]
77
- run_benchmark_and_print_results(data, method(:json_serialize_data_jrjackson))
78
-
79
- # Around ~200 keys in a hash (single level)
80
- data = DATASETS[:keys_200_flat]
81
- run_benchmark_and_print_results(data, method(:json_serialize_data_jrjackson))
82
-
83
- # Around ~2700 keys in a hash
84
- data = DATASETS[:keys_2700]
85
- run_benchmark_and_print_results(data, method(:json_serialize_data_jrjackson))
@@ -1,48 +0,0 @@
1
- require 'benchmark'
2
- require 'quantile'
3
-
4
- require_relative '../../lib/scalyr/common/util'
5
-
6
- # Micro benchmark which measures how much overhead Quantile.observe adds vs random sampling to see
7
- # where making sampling (e.g. on event level metrics) is desired
8
-
9
- ITERATIONS = 10000
10
-
11
- def run_benchmark_and_print_results(run_benchmark_func)
12
- result = []
13
- ITERATIONS.times do |i|
14
- result << Benchmark.measure { run_benchmark_func.() }
15
- end
16
-
17
- sum = result.inject(nil) { |sum, t| sum.nil? ? sum = t : sum += t }
18
- avg = sum / result.size
19
-
20
- Benchmark.bm(7, "sum:", "avg:") do |b|
21
- [sum, avg]
22
- end
23
- puts ""
24
- end
25
-
26
-
27
- puts "Using %s iterations" % [ITERATIONS]
28
- puts ""
29
-
30
- @value = Quantile::Estimator.new
31
- @prng = Random.new
32
-
33
- def quantile_observe()
34
- @value.observe(5)
35
- end
36
-
37
- def random_sample()
38
- return @prng.rand(0.0..1.0) < 0.5
39
- end
40
-
41
- puts "Quartile.observe()"
42
- puts "==============================="
43
-
44
- run_benchmark_and_print_results(method(:quantile_observe))
45
-
46
- puts "random sample"
47
- puts "==============================="
48
- run_benchmark_and_print_results(method(:random_sample))
@@ -1,107 +0,0 @@
1
- require 'benchmark'
2
- require 'quantile'
3
-
4
- require_relative '../../lib/scalyr/constants'
5
- require_relative '../../lib/scalyr/common/util'
6
- require_relative './util'
7
-
8
- # Micro benchmark which measures how long "set_session_level_serverhost_on_events" takes
9
-
10
- ITERATIONS = 100
11
-
12
- def run_benchmark_and_print_results(data, run_benchmark_func)
13
- puts ""
14
- puts "Using %s total events in a batch" % [data[0].size]
15
- puts ""
16
-
17
- result = []
18
- ITERATIONS.times do |i|
19
- result << Benchmark.measure { run_benchmark_func.(data[i]) }
20
- end
21
-
22
- sum = result.inject(nil) { |sum, t| sum.nil? ? sum = t : sum += t }
23
- avg = sum / result.size
24
-
25
- Benchmark.bm(7, "sum:", "avg:") do |b|
26
- [sum, avg]
27
- end
28
- puts ""
29
- end
30
-
31
- # Generate random event with only single event having special server host attribute set which
32
- # represents a worst case scenario since we need to backfill rest of the events.
33
- def generate_events(count)
34
- result = []
35
-
36
- ITERATIONS.times do |iteration|
37
- events = []
38
-
39
- count.times do |index|
40
- event = generate_hash([2])
41
- event[:attrs] = Hash.new
42
- event[:log] = 1
43
-
44
- if index == count - 1
45
- event[:attrs][EVENT_LEVEL_SERVER_HOST_ATTRIBUTE_NAME] = format("test-host-%s", index)
46
- end
47
-
48
- events << event
49
- end
50
-
51
- raise "Assertion failed" unless events.size == count
52
-
53
- result << events
54
- end
55
-
56
- raise "Assertion failed" unless result.size == ITERATIONS
57
- result
58
- end
59
-
60
- def run_func(events)
61
- # NOTE: This function manipulates events in place
62
- events.each_with_index do |event, index|
63
- if index < events.size - 1
64
- # Last event will have _origServerHost set, but others won't
65
- raise "Assertion failed" unless event[:attrs][EVENT_LEVEL_SERVER_HOST_ATTRIBUTE_NAME].nil?
66
- end
67
- end
68
-
69
- Scalyr::Common::Util.set_session_level_serverhost_on_events("session-server-host-dummy", events, {}, true)
70
-
71
- events.each do |event|
72
- raise "Assertion failed" unless event[:attrs][EVENT_LEVEL_SERVER_HOST_ATTRIBUTE_NAME].nil? == false
73
- end
74
- end
75
-
76
-
77
- puts "Using %s iterations" % [ITERATIONS]
78
- puts ""
79
-
80
- @value = Quantile::Estimator.new
81
-
82
- puts "Util.set_session_level_serverhost_on_events()"
83
- puts "==============================="
84
-
85
- # 100 events in a batch
86
- data = generate_events(100)
87
- run_benchmark_and_print_results(data, method(:run_func))
88
-
89
- # 500 events in a batch
90
- data = generate_events(500)
91
- run_benchmark_and_print_results(data, method(:run_func))
92
-
93
- # 1000 events in a batch
94
- data = generate_events(1000)
95
- run_benchmark_and_print_results(data, method(:run_func))
96
-
97
- # 2000 events in a batch
98
- data = generate_events(2000)
99
- run_benchmark_and_print_results(data, method(:run_func))
100
-
101
- # 3000 events in a batch
102
- data = generate_events(3000)
103
- run_benchmark_and_print_results(data, method(:run_func))
104
-
105
- # 5000 events in a batch
106
- data = generate_events(5000)
107
- run_benchmark_and_print_results(data, method(:run_func))
@@ -1,24 +0,0 @@
1
- def rand_str(len)
2
- return (0...len).map { (65 + rand(26)).chr }.join
3
- end
4
-
5
- def generate_hash(widths)
6
- result = {}
7
- if widths.empty?
8
- return rand_str(20)
9
- else
10
- widths[0].times do
11
- result[rand_str(9)] = generate_hash(widths[1..widths.length])
12
- end
13
- return result
14
- end
15
- end
16
-
17
- def generate_data_array_for_spec(spec)
18
- data = []
19
- ITERATIONS.times do
20
- data << generate_hash(spec)
21
- end
22
-
23
- data
24
- end
@@ -1,41 +0,0 @@
1
- -----BEGIN CERTIFICATE-----
2
- MIIHQDCCBiigAwIBAgIQD9B43Ujxor1NDyupa2A4/jANBgkqhkiG9w0BAQsFADBN
3
- MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMScwJQYDVQQDEx5E
4
- aWdpQ2VydCBTSEEyIFNlY3VyZSBTZXJ2ZXIgQ0EwHhcNMTgxMTI4MDAwMDAwWhcN
5
- MjAxMjAyMTIwMDAwWjCBpTELMAkGA1UEBhMCVVMxEzARBgNVBAgTCkNhbGlmb3Ju
6
- aWExFDASBgNVBAcTC0xvcyBBbmdlbGVzMTwwOgYDVQQKEzNJbnRlcm5ldCBDb3Jw
7
- b3JhdGlvbiBmb3IgQXNzaWduZWQgTmFtZXMgYW5kIE51bWJlcnMxEzARBgNVBAsT
8
- ClRlY2hub2xvZ3kxGDAWBgNVBAMTD3d3dy5leGFtcGxlLm9yZzCCASIwDQYJKoZI
9
- hvcNAQEBBQADggEPADCCAQoCggEBANDwEnSgliByCGUZElpdStA6jGaPoCkrp9vV
10
- rAzPpXGSFUIVsAeSdjF11yeOTVBqddF7U14nqu3rpGA68o5FGGtFM1yFEaogEv5g
11
- rJ1MRY/d0w4+dw8JwoVlNMci+3QTuUKf9yH28JxEdG3J37Mfj2C3cREGkGNBnY80
12
- eyRJRqzy8I0LSPTTkhr3okXuzOXXg38ugr1x3SgZWDNuEaE6oGpyYJIBWZ9jF3pJ
13
- QnucP9vTBejMh374qvyd0QVQq3WxHrogy4nUbWw3gihMxT98wRD1oKVma1NTydvt
14
- hcNtBfhkp8kO64/hxLHrLWgOFT/l4tz8IWQt7mkrBHjbd2XLVPkCAwEAAaOCA8Ew
15
- ggO9MB8GA1UdIwQYMBaAFA+AYRyCMWHVLyjnjUY4tCzhxtniMB0GA1UdDgQWBBRm
16
- mGIC4AmRp9njNvt2xrC/oW2nvjCBgQYDVR0RBHoweIIPd3d3LmV4YW1wbGUub3Jn
17
- ggtleGFtcGxlLmNvbYILZXhhbXBsZS5lZHWCC2V4YW1wbGUubmV0ggtleGFtcGxl
18
- Lm9yZ4IPd3d3LmV4YW1wbGUuY29tgg93d3cuZXhhbXBsZS5lZHWCD3d3dy5leGFt
19
- cGxlLm5ldDAOBgNVHQ8BAf8EBAMCBaAwHQYDVR0lBBYwFAYIKwYBBQUHAwEGCCsG
20
- AQUFBwMCMGsGA1UdHwRkMGIwL6AtoCuGKWh0dHA6Ly9jcmwzLmRpZ2ljZXJ0LmNv
21
- bS9zc2NhLXNoYTItZzYuY3JsMC+gLaArhilodHRwOi8vY3JsNC5kaWdpY2VydC5j
22
- b20vc3NjYS1zaGEyLWc2LmNybDBMBgNVHSAERTBDMDcGCWCGSAGG/WwBATAqMCgG
23
- CCsGAQUFBwIBFhxodHRwczovL3d3dy5kaWdpY2VydC5jb20vQ1BTMAgGBmeBDAEC
24
- AjB8BggrBgEFBQcBAQRwMG4wJAYIKwYBBQUHMAGGGGh0dHA6Ly9vY3NwLmRpZ2lj
25
- ZXJ0LmNvbTBGBggrBgEFBQcwAoY6aHR0cDovL2NhY2VydHMuZGlnaWNlcnQuY29t
26
- L0RpZ2lDZXJ0U0hBMlNlY3VyZVNlcnZlckNBLmNydDAMBgNVHRMBAf8EAjAAMIIB
27
- fwYKKwYBBAHWeQIEAgSCAW8EggFrAWkAdwCkuQmQtBhYFIe7E6LMZ3AKPDWYBPkb
28
- 37jjd80OyA3cEAAAAWdcMZVGAAAEAwBIMEYCIQCEZIG3IR36Gkj1dq5L6EaGVycX
29
- sHvpO7dKV0JsooTEbAIhALuTtf4wxGTkFkx8blhTV+7sf6pFT78ORo7+cP39jkJC
30
- AHYAh3W/51l8+IxDmV+9827/Vo1HVjb/SrVgwbTq/16ggw8AAAFnXDGWFQAABAMA
31
- RzBFAiBvqnfSHKeUwGMtLrOG3UGLQIoaL3+uZsGTX3MfSJNQEQIhANL5nUiGBR6g
32
- l0QlCzzqzvorGXyB/yd7nttYttzo8EpOAHYAb1N2rDHwMRnYmQCkURX/dxUcEdkC
33
- wQApBo2yCJo32RMAAAFnXDGWnAAABAMARzBFAiEA5Hn7Q4SOyqHkT+kDsHq7ku7z
34
- RDuM7P4UDX2ft2Mpny0CIE13WtxJAUr0aASFYZ/XjSAMMfrB0/RxClvWVss9LHKM
35
- MA0GCSqGSIb3DQEBCwUAA4IBAQBzcIXvQEGnakPVeJx7VUjmvGuZhrr7DQOLeP4R
36
- 8CmgDM1pFAvGBHiyzvCH1QGdxFl6cf7wbp7BoLCRLR/qPVXFMwUMzcE1GLBqaGZM
37
- v1Yh2lvZSLmMNSGRXdx113pGLCInpm/TOhfrvr0TxRImc8BdozWJavsn1N2qdHQu
38
- N+UBO6bQMLCD0KHEdSGFsuX6ZwAworxTg02/1qiDu7zW7RyzHvFYA4IAjpzvkPIa
39
- X6KjBtpdvp/aXabmL95YgBjT8WJ7pqOfrqhpcmOBZa6Cg6O1l4qbIFH/Gj9hQB5I
40
- 0Gs4+eH6F9h3SojmPTYkT+8KuZ9w84Mn+M8qBXUQoYoKgIjN
41
- -----END CERTIFICATE-----