scout_apm_logging 0.0.13 → 1.0.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/.gitignore +0 -4
- data/.rubocop.yml +1 -1
- data/CHANGELOG.md +6 -0
- data/NOTICE +4 -0
- data/lib/scout_apm/logging/config.rb +5 -131
- data/lib/scout_apm/logging/loggers/capture.rb +3 -2
- data/lib/scout_apm/logging/loggers/formatter.rb +21 -2
- data/lib/scout_apm/logging/loggers/opentelemetry/LICENSE +201 -0
- data/lib/scout_apm/logging/loggers/opentelemetry/NOTICE +9 -0
- data/lib/scout_apm/logging/loggers/opentelemetry/api/logs/log_record.rb +18 -0
- data/lib/scout_apm/logging/loggers/opentelemetry/api/logs/logger.rb +64 -0
- data/lib/scout_apm/logging/loggers/opentelemetry/api/logs/logger_provider.rb +31 -0
- data/lib/scout_apm/logging/loggers/opentelemetry/api/logs/severity_number.rb +43 -0
- data/lib/scout_apm/logging/loggers/opentelemetry/api/logs/version.rb +18 -0
- data/lib/scout_apm/logging/loggers/opentelemetry/api/logs.rb +28 -0
- data/lib/scout_apm/logging/loggers/opentelemetry/exporter/exporter/otlp/logs_exporter.rb +389 -0
- data/lib/scout_apm/logging/loggers/opentelemetry/exporter/exporter/otlp/version.rb +20 -0
- data/lib/scout_apm/logging/loggers/opentelemetry/exporter/proto/collector/logs/v1/logs_service_pb.rb +43 -0
- data/lib/scout_apm/logging/loggers/opentelemetry/exporter/proto/common/v1/common_pb.rb +58 -0
- data/lib/scout_apm/logging/loggers/opentelemetry/exporter/proto/logs/v1/logs_pb.rb +91 -0
- data/lib/scout_apm/logging/loggers/opentelemetry/exporter/proto/resource/v1/resource_pb.rb +33 -0
- data/lib/scout_apm/logging/loggers/opentelemetry/opentelemetry.rb +62 -0
- data/lib/scout_apm/logging/loggers/opentelemetry/sdk/logs/export/batch_log_record_processor.rb +225 -0
- data/lib/scout_apm/logging/loggers/opentelemetry/sdk/logs/export/log_record_exporter.rb +64 -0
- data/lib/scout_apm/logging/loggers/opentelemetry/sdk/logs/export.rb +34 -0
- data/lib/scout_apm/logging/loggers/opentelemetry/sdk/logs/log_record.rb +115 -0
- data/lib/scout_apm/logging/loggers/opentelemetry/sdk/logs/log_record_data.rb +31 -0
- data/lib/scout_apm/logging/loggers/opentelemetry/sdk/logs/log_record_processor.rb +53 -0
- data/lib/scout_apm/logging/loggers/opentelemetry/sdk/logs/logger.rb +94 -0
- data/lib/scout_apm/logging/loggers/opentelemetry/sdk/logs/logger_provider.rb +158 -0
- data/lib/scout_apm/logging/loggers/opentelemetry/sdk/logs/version.rb +20 -0
- data/lib/scout_apm/logging/loggers/opentelemetry/sdk/logs.rb +28 -0
- data/lib/scout_apm/logging/utils.rb +0 -69
- data/lib/scout_apm/logging/version.rb +1 -1
- data/lib/scout_apm_logging.rb +2 -11
- data/scout_apm_logging.gemspec +7 -0
- data/spec/data/config_test_1.yml +0 -1
- data/spec/data/mock_config.yml +0 -3
- data/spec/integration/rails/lifecycle_spec.rb +57 -23
- data/spec/spec_helper.rb +0 -12
- data/spec/unit/config_spec.rb +0 -12
- data/spec/unit/loggers/capture_spec.rb +0 -6
- metadata +126 -39
- data/bin/scout_apm_logging_monitor +0 -6
- data/lib/scout_apm/logging/monitor/_rails.rb +0 -22
- data/lib/scout_apm/logging/monitor/collector/checksum.rb +0 -51
- data/lib/scout_apm/logging/monitor/collector/configuration.rb +0 -150
- data/lib/scout_apm/logging/monitor/collector/downloader.rb +0 -78
- data/lib/scout_apm/logging/monitor/collector/extractor.rb +0 -37
- data/lib/scout_apm/logging/monitor/collector/manager.rb +0 -57
- data/lib/scout_apm/logging/monitor/monitor.rb +0 -216
- data/lib/scout_apm/logging/monitor_manager/manager.rb +0 -162
- data/lib/scout_apm/logging/state.rb +0 -69
- data/spec/data/empty_logs_config.yml +0 -0
- data/spec/data/logs_config.yml +0 -3
- data/spec/data/state_file.json +0 -3
- data/spec/integration/loggers/capture_spec.rb +0 -68
- data/spec/integration/monitor/collector/downloader/will_verify_checksum.rb +0 -49
- data/spec/integration/monitor/collector_healthcheck_spec.rb +0 -29
- data/spec/integration/monitor/continuous_state_collector_spec.rb +0 -31
- data/spec/integration/monitor/previous_collector_setup_spec.rb +0 -45
- data/spec/integration/monitor_manager/disable_agent_spec.rb +0 -30
- data/spec/integration/monitor_manager/monitor_pid_file_spec.rb +0 -38
- data/spec/integration/monitor_manager/single_monitor_spec.rb +0 -53
- data/spec/unit/monitor/collector/configuration_spec.rb +0 -64
- data/spec/unit/state_spec.rb +0 -20
- data/tooling/checksums.rb +0 -106
@@ -1,216 +0,0 @@
|
|
1
|
-
# frozen_string_literal: true
|
2
|
-
|
3
|
-
##
|
4
|
-
# Launched as a daemon process by the monitor manager at Rails startup.
|
5
|
-
##
|
6
|
-
require 'json'
|
7
|
-
|
8
|
-
require 'scout_apm'
|
9
|
-
|
10
|
-
require_relative '../logger'
|
11
|
-
require_relative '../context'
|
12
|
-
require_relative '../config'
|
13
|
-
require_relative '../utils'
|
14
|
-
require_relative '../state'
|
15
|
-
require_relative './collector/manager'
|
16
|
-
|
17
|
-
module ScoutApm
|
18
|
-
module Logging
|
19
|
-
# Entry point for the monitor daemon process.
|
20
|
-
class Monitor
|
21
|
-
attr_reader :context
|
22
|
-
attr_accessor :latest_state_sha
|
23
|
-
|
24
|
-
@@instance = nil
|
25
|
-
|
26
|
-
def self.instance
|
27
|
-
@@instance ||= new
|
28
|
-
end
|
29
|
-
|
30
|
-
def initialize
|
31
|
-
@context = Context.new
|
32
|
-
context.logger.debug('Monitor instance created')
|
33
|
-
|
34
|
-
context.application_root = $stdin.gets&.chomp
|
35
|
-
|
36
|
-
# Load in the dynamic and state based config settings.
|
37
|
-
context.config = Config.with_file(context, determine_scout_config_filepath)
|
38
|
-
|
39
|
-
daemonize_process!
|
40
|
-
end
|
41
|
-
|
42
|
-
def setup!
|
43
|
-
context.config.logger.info('Monitor daemon process started')
|
44
|
-
|
45
|
-
add_exit_handler!
|
46
|
-
|
47
|
-
unless has_logs_to_monitor?
|
48
|
-
context.config.logger.warn('No logs are set to be monitored. Please set the `logs_monitored` config setting. Exiting.')
|
49
|
-
return
|
50
|
-
end
|
51
|
-
|
52
|
-
initiate_collector_setup! unless has_previous_collector_setup?
|
53
|
-
|
54
|
-
@latest_state_sha = get_state_file_sha
|
55
|
-
|
56
|
-
run!
|
57
|
-
end
|
58
|
-
|
59
|
-
def run!
|
60
|
-
# Prevent the monitor from checking the collector health before it's fully started.
|
61
|
-
# Having this be configurable is useful for testing.
|
62
|
-
sleep context.config.value('monitor_interval_delay')
|
63
|
-
|
64
|
-
loop do
|
65
|
-
sleep context.config.value('monitor_interval')
|
66
|
-
|
67
|
-
check_collector_health
|
68
|
-
|
69
|
-
check_state_change
|
70
|
-
end
|
71
|
-
end
|
72
|
-
|
73
|
-
# Only useful for testing.
|
74
|
-
def config=(config)
|
75
|
-
context.config = config
|
76
|
-
end
|
77
|
-
|
78
|
-
private
|
79
|
-
|
80
|
-
def daemonize_process!
|
81
|
-
# Similar to that of Process.daemon, but we want to keep the dir, STDOUT and STDERR.
|
82
|
-
exit if fork
|
83
|
-
Process.setsid
|
84
|
-
exit if fork
|
85
|
-
$stdin.reopen '/dev/null'
|
86
|
-
|
87
|
-
context.logger.debug("Monitor process daemonized, PID: #{Process.pid}")
|
88
|
-
File.write(context.config.value('monitor_pid_file'), Process.pid)
|
89
|
-
end
|
90
|
-
|
91
|
-
def has_logs_to_monitor?
|
92
|
-
context.config.value('logs_monitored').any?
|
93
|
-
end
|
94
|
-
|
95
|
-
def has_previous_collector_setup?
|
96
|
-
return false unless context.config.value('health_check_port') != 0
|
97
|
-
|
98
|
-
healthy_response = request_health_check_port("http://localhost:#{context.config.value('health_check_port')}/")
|
99
|
-
|
100
|
-
if healthy_response
|
101
|
-
context.logger.info("Collector already setup on port #{context.config.value('health_check_port')}")
|
102
|
-
else
|
103
|
-
context.logger.info('Setting up new collector')
|
104
|
-
end
|
105
|
-
|
106
|
-
healthy_response
|
107
|
-
end
|
108
|
-
|
109
|
-
def initiate_collector_setup!
|
110
|
-
set_health_check_port!
|
111
|
-
|
112
|
-
Collector::Manager.new(context).setup!
|
113
|
-
end
|
114
|
-
|
115
|
-
def is_port_available?(port)
|
116
|
-
socket = Socket.new(Socket::AF_INET, Socket::SOCK_STREAM, 0)
|
117
|
-
remote_address = Socket.sockaddr_in(port, '127.0.0.1')
|
118
|
-
|
119
|
-
begin
|
120
|
-
socket.connect_nonblock(remote_address)
|
121
|
-
rescue Errno::EINPROGRESS
|
122
|
-
IO.select(nil, [socket])
|
123
|
-
retry
|
124
|
-
rescue Errno::EISCONN, Errno::ECONNRESET
|
125
|
-
false
|
126
|
-
rescue Errno::ECONNREFUSED, Errno::EHOSTUNREACH
|
127
|
-
true
|
128
|
-
ensure
|
129
|
-
socket.close if socket && !socket.closed?
|
130
|
-
end
|
131
|
-
end
|
132
|
-
|
133
|
-
def set_health_check_port!
|
134
|
-
health_check_port = 13_133
|
135
|
-
until is_port_available?(health_check_port)
|
136
|
-
sleep 0.1
|
137
|
-
health_check_port += 1
|
138
|
-
end
|
139
|
-
|
140
|
-
Config::ConfigDynamic.set_value('health_check_port', health_check_port)
|
141
|
-
context.config.state.flush_state!
|
142
|
-
end
|
143
|
-
|
144
|
-
def request_health_check_port(endpoint)
|
145
|
-
uri = URI(endpoint)
|
146
|
-
|
147
|
-
begin
|
148
|
-
response = Net::HTTP.get_response(uri)
|
149
|
-
|
150
|
-
unless response.is_a?(Net::HTTPSuccess)
|
151
|
-
context.logger.error("Error occurred while checking collector health: #{response.message}")
|
152
|
-
return false
|
153
|
-
end
|
154
|
-
rescue StandardError => e
|
155
|
-
context.logger.error("Error occurred while checking collector health: #{e.message}")
|
156
|
-
return false
|
157
|
-
end
|
158
|
-
|
159
|
-
true
|
160
|
-
end
|
161
|
-
|
162
|
-
def check_collector_health
|
163
|
-
context.logger.debug('Checking collector health')
|
164
|
-
collector_health_endpoint = "http://localhost:#{context.config.value('health_check_port')}/"
|
165
|
-
|
166
|
-
healthy_response = request_health_check_port(collector_health_endpoint)
|
167
|
-
|
168
|
-
initiate_collector_setup! unless healthy_response
|
169
|
-
end
|
170
|
-
|
171
|
-
def remove_collector_process # rubocop:disable Metrics/AbcSize
|
172
|
-
return unless File.exist? context.config.value('collector_pid_file')
|
173
|
-
|
174
|
-
process_id = File.read(context.config.value('collector_pid_file'))
|
175
|
-
return if process_id.empty?
|
176
|
-
|
177
|
-
begin
|
178
|
-
Process.kill('TERM', process_id.to_i)
|
179
|
-
rescue Errno::ENOENT, Errno::ESRCH => e
|
180
|
-
context.logger.error("Error occurred while removing collector process from monitor: #{e.message}")
|
181
|
-
ensure
|
182
|
-
File.delete(context.config.value('collector_pid_file'))
|
183
|
-
end
|
184
|
-
end
|
185
|
-
|
186
|
-
def check_state_change
|
187
|
-
current_sha = get_state_file_sha
|
188
|
-
|
189
|
-
return if current_sha == latest_state_sha
|
190
|
-
|
191
|
-
remove_collector_process
|
192
|
-
initiate_collector_setup!
|
193
|
-
|
194
|
-
# File SHA can change due to port mappings on collector setup.
|
195
|
-
@latest_state_sha = get_state_file_sha
|
196
|
-
end
|
197
|
-
|
198
|
-
def add_exit_handler!
|
199
|
-
at_exit do
|
200
|
-
# There may not be a file to delete, as the monitor manager ensures cleaning it up when monitoring is disabled.
|
201
|
-
File.delete(context.config.value('monitor_pid_file')) if File.exist?(context.config.value('monitor_pid_file'))
|
202
|
-
end
|
203
|
-
end
|
204
|
-
|
205
|
-
def get_state_file_sha
|
206
|
-
return nil unless File.exist?(context.config.value('monitor_state_file'))
|
207
|
-
|
208
|
-
`sha256sum #{context.config.value('monitor_state_file')}`.split(' ').first
|
209
|
-
end
|
210
|
-
|
211
|
-
def determine_scout_config_filepath
|
212
|
-
"#{context.application_root}/config/scout_apm.yml"
|
213
|
-
end
|
214
|
-
end
|
215
|
-
end
|
216
|
-
end
|
@@ -1,162 +0,0 @@
|
|
1
|
-
# frozen_string_literal: true
|
2
|
-
|
3
|
-
module ScoutApm
|
4
|
-
module Logging
|
5
|
-
# Manages the creation of the daemon monitor process.
|
6
|
-
class MonitorManager
|
7
|
-
attr_reader :context
|
8
|
-
|
9
|
-
@@instance = nil
|
10
|
-
|
11
|
-
def self.instance
|
12
|
-
@@instance ||= new
|
13
|
-
end
|
14
|
-
|
15
|
-
def initialize
|
16
|
-
@context = Context.new
|
17
|
-
context.config = Config.with_file(context, context.config.value('config_file'))
|
18
|
-
end
|
19
|
-
|
20
|
-
def setup!
|
21
|
-
context.config.log_settings(context.logger)
|
22
|
-
context.logger.info('Setting up monitor daemon process')
|
23
|
-
|
24
|
-
add_exit_handler!
|
25
|
-
|
26
|
-
determine_configuration_state
|
27
|
-
end
|
28
|
-
|
29
|
-
def determine_configuration_state
|
30
|
-
monitoring_enabled = context.config.value('logs_monitor')
|
31
|
-
|
32
|
-
if monitoring_enabled
|
33
|
-
context.logger.info('Log monitoring enabled')
|
34
|
-
create_process
|
35
|
-
|
36
|
-
# Continue to hold the lock until we have written the PID file.
|
37
|
-
ensure_monitor_pid_file_exists
|
38
|
-
else
|
39
|
-
context.logger.info('Log monitoring disabled')
|
40
|
-
remove_processes
|
41
|
-
end
|
42
|
-
end
|
43
|
-
|
44
|
-
# With the use of fileoffsets in the collector, and the persistent queue of already collected logs,
|
45
|
-
# we can safely restart the collector. Due to the way fingerprinting of the files works, if the
|
46
|
-
# file path switches, but the beginning contents of the file remain the same, the file will be
|
47
|
-
# treated as the same file as before.
|
48
|
-
# If logs get rotated, the fingerprint changes, and the collector automatically detects this.
|
49
|
-
def add_exit_handler!
|
50
|
-
# With the use of unicorn and puma worker killer, we want to ensure we only restart (exit and
|
51
|
-
# eventually start) the monitor and collector when the main process exits, and not the workers.
|
52
|
-
initialized_process_id = Process.pid
|
53
|
-
at_exit do
|
54
|
-
# Only remove/restart the monitor and collector if we are exiting from an app_server process.
|
55
|
-
# We need to wait on this check, as the process command line changes at some point.
|
56
|
-
if Utils.current_process_is_app_server? && Process.pid == initialized_process_id
|
57
|
-
context.logger.debug('Exiting from app server process. Removing monitor and collector processes.')
|
58
|
-
remove_processes
|
59
|
-
end
|
60
|
-
end
|
61
|
-
end
|
62
|
-
|
63
|
-
def create_process
|
64
|
-
return if process_exists?
|
65
|
-
|
66
|
-
Utils.ensure_directory_exists(context.config.value('monitor_pid_file'))
|
67
|
-
|
68
|
-
reader, writer = IO.pipe
|
69
|
-
|
70
|
-
gem_directory = File.expand_path('../../../..', __dir__)
|
71
|
-
|
72
|
-
# As we daemonize the process, we will write to the pid file within the process.
|
73
|
-
pid = Process.spawn("ruby #{gem_directory}/bin/scout_apm_logging_monitor", in: reader)
|
74
|
-
|
75
|
-
reader.close
|
76
|
-
# TODO: Add support for Sinatra.
|
77
|
-
writer.puts Rails.root if defined?(Rails)
|
78
|
-
writer.close
|
79
|
-
# Block until we have spawned the process and forked. This is to ensure
|
80
|
-
# we keep the exclusive lock until the process has written the PID file.
|
81
|
-
Process.wait(pid)
|
82
|
-
end
|
83
|
-
|
84
|
-
private
|
85
|
-
|
86
|
-
def ensure_monitor_pid_file_exists
|
87
|
-
start_time = Time.now
|
88
|
-
# We don't want to hold up the initial Rails boot time for very long.
|
89
|
-
timeout_seconds = 0.1
|
90
|
-
|
91
|
-
# Naive benchmarks show this taking ~0.01 seconds.
|
92
|
-
loop do
|
93
|
-
if File.exist?(context.config.value('monitor_pid_file'))
|
94
|
-
context.logger.debug('Monitor PID file exists. Releasing lock.')
|
95
|
-
break
|
96
|
-
end
|
97
|
-
|
98
|
-
if Time.now - start_time > timeout_seconds
|
99
|
-
context.logger.warn('Unable to verify monitor PID file write. Releasing lock.')
|
100
|
-
break
|
101
|
-
end
|
102
|
-
|
103
|
-
sleep 0.01
|
104
|
-
end
|
105
|
-
end
|
106
|
-
|
107
|
-
def process_exists?
|
108
|
-
return false unless File.exist? context.config.value('monitor_pid_file')
|
109
|
-
|
110
|
-
process_id = File.read(context.config.value('monitor_pid_file'))
|
111
|
-
return false if process_id.empty?
|
112
|
-
|
113
|
-
process_exists = Utils.check_process_liveliness(process_id.to_i, 'scout_apm_logging_monitor')
|
114
|
-
File.delete(context.config.value('monitor_pid_file')) unless process_exists
|
115
|
-
|
116
|
-
process_exists
|
117
|
-
end
|
118
|
-
|
119
|
-
def remove_monitor_process # rubocop:disable Metrics/AbcSize
|
120
|
-
return unless File.exist? context.config.value('monitor_pid_file')
|
121
|
-
|
122
|
-
process_id = File.read(context.config.value('monitor_pid_file'))
|
123
|
-
return if process_id.empty?
|
124
|
-
|
125
|
-
begin
|
126
|
-
Process.kill('TERM', process_id.to_i)
|
127
|
-
rescue Errno::ENOENT, Errno::ESRCH => e
|
128
|
-
context.logger.error("Error occurred while removing monitor process: #{e.message}")
|
129
|
-
File.delete(context.config.value('monitor_pid_file'))
|
130
|
-
end
|
131
|
-
end
|
132
|
-
|
133
|
-
def remove_collector_process # rubocop:disable Metrics/AbcSize
|
134
|
-
return unless File.exist? context.config.value('collector_pid_file')
|
135
|
-
|
136
|
-
process_id = File.read(context.config.value('collector_pid_file'))
|
137
|
-
return if process_id.empty?
|
138
|
-
|
139
|
-
begin
|
140
|
-
Process.kill('TERM', process_id.to_i)
|
141
|
-
rescue Errno::ENOENT, Errno::ESRCH => e
|
142
|
-
context.logger.error("Error occurred while removing collector process from manager: #{e.message}")
|
143
|
-
ensure
|
144
|
-
File.delete(context.config.value('collector_pid_file'))
|
145
|
-
end
|
146
|
-
end
|
147
|
-
|
148
|
-
def remove_data_file
|
149
|
-
return unless File.exist? context.config.value('monitor_state_file')
|
150
|
-
|
151
|
-
File.delete(context.config.value('monitor_state_file'))
|
152
|
-
end
|
153
|
-
|
154
|
-
# Remove both the monitor and collector processes that we have spawned.
|
155
|
-
def remove_processes
|
156
|
-
remove_monitor_process
|
157
|
-
remove_collector_process
|
158
|
-
remove_data_file
|
159
|
-
end
|
160
|
-
end
|
161
|
-
end
|
162
|
-
end
|
@@ -1,69 +0,0 @@
|
|
1
|
-
# frozen_string_literal: true
|
2
|
-
|
3
|
-
module ScoutApm
|
4
|
-
module Logging
|
5
|
-
class Config
|
6
|
-
# Responsibling for ensuring safe interprocess persitance around configuration state.
|
7
|
-
class State
|
8
|
-
attr_reader :context
|
9
|
-
|
10
|
-
def initialize(context)
|
11
|
-
@context = context
|
12
|
-
end
|
13
|
-
|
14
|
-
def load_state_from_file
|
15
|
-
return unless File.exist?(context.config.value('monitor_state_file'))
|
16
|
-
|
17
|
-
file_contents = File.read(context.config.value('monitor_state_file'))
|
18
|
-
JSON.parse(file_contents)
|
19
|
-
end
|
20
|
-
|
21
|
-
def flush_to_file!(updated_log_locations = []) # rubocop:disable Metrics/AbcSize
|
22
|
-
Utils.ensure_directory_exists(context.config.value('monitor_state_file'))
|
23
|
-
|
24
|
-
File.open(context.config.value('monitor_state_file'), (File::RDWR | File::CREAT), 0o644) do |file|
|
25
|
-
file.flock(File::LOCK_EX)
|
26
|
-
|
27
|
-
data = Config::ConfigState.get_values_to_set.each_with_object({}) do |key, memo|
|
28
|
-
memo[key] = context.config.value(key)
|
29
|
-
end
|
30
|
-
|
31
|
-
contents = file.read
|
32
|
-
old_log_state_files = if contents.empty?
|
33
|
-
[]
|
34
|
-
else
|
35
|
-
current_data = JSON.parse(contents)
|
36
|
-
current_data['logs_monitored']
|
37
|
-
end
|
38
|
-
|
39
|
-
data['logs_monitored'] =
|
40
|
-
merge_and_dedup_log_locations(updated_log_locations, old_log_state_files, data['logs_monitored'])
|
41
|
-
|
42
|
-
file.rewind # Move cursor to beginning of the file
|
43
|
-
file.truncate(0) # Truncate existing content
|
44
|
-
file.write(JSON.pretty_generate(data))
|
45
|
-
rescue StandardError => e
|
46
|
-
context.logger.error("Error occurred while flushing state to file: #{e.message}. Unlocking.")
|
47
|
-
ensure
|
48
|
-
file.flock(File::LOCK_UN)
|
49
|
-
end
|
50
|
-
end
|
51
|
-
|
52
|
-
private
|
53
|
-
|
54
|
-
# Should we add better detection for similar basenames but different paths?
|
55
|
-
# May be a bit tricky with tools like capistrano and releases paths differentiated by time.
|
56
|
-
def merge_and_dedup_log_locations(*log_locations)
|
57
|
-
# Take the new logs if duplication (those first passed in the args) as we could be in a newer release.
|
58
|
-
logs = log_locations.reduce([], :concat)
|
59
|
-
merged = logs.each_with_object({}) do |log_path, hash|
|
60
|
-
base_name = File.basename(log_path)
|
61
|
-
hash[base_name] ||= log_path
|
62
|
-
end
|
63
|
-
|
64
|
-
merged.values
|
65
|
-
end
|
66
|
-
end
|
67
|
-
end
|
68
|
-
end
|
69
|
-
end
|
File without changes
|
data/spec/data/logs_config.yml
DELETED
data/spec/data/state_file.json
DELETED
@@ -1,68 +0,0 @@
|
|
1
|
-
require 'logger'
|
2
|
-
|
3
|
-
require 'spec_helper'
|
4
|
-
|
5
|
-
require_relative '../../../lib/scout_apm/logging/loggers/capture'
|
6
|
-
|
7
|
-
describe ScoutApm::Logging::Loggers::Capture do
|
8
|
-
it 'should find the logger, capture the log destination, and rotate collector configs' do
|
9
|
-
ENV['SCOUT_MONITOR_INTERVAL'] = '10'
|
10
|
-
ENV['SCOUT_MONITOR_INTERVAL_DELAY'] = '10'
|
11
|
-
ENV['SCOUT_LOGS_MONITOR'] = 'true'
|
12
|
-
|
13
|
-
context = ScoutApm::Logging::MonitorManager.instance.context
|
14
|
-
|
15
|
-
state_file_location = context.config.value('monitor_state_file')
|
16
|
-
collector_pid_location = context.config.value('collector_pid_file')
|
17
|
-
ScoutApm::Logging::Utils.ensure_directory_exists(state_file_location)
|
18
|
-
|
19
|
-
first_logger = ScoutTestLogger.new('/tmp/first_file.log')
|
20
|
-
first_logger_basename = File.basename(first_logger.instance_variable_get(:@logdev).filename.to_s)
|
21
|
-
first_logger_updated_path = File.join(context.config.value('logs_proxy_log_dir'), first_logger_basename)
|
22
|
-
TestLoggerWrapper.logger = first_logger
|
23
|
-
|
24
|
-
similuate_railtie
|
25
|
-
|
26
|
-
# Give the process time to initialize, download the collector, and start it
|
27
|
-
wait_for_process_with_timeout!('otelcol-contrib', 20)
|
28
|
-
|
29
|
-
expect(`pgrep otelcol-contrib --runstates D,R,S`).not_to be_empty
|
30
|
-
collector_pid = File.read(collector_pid_location)
|
31
|
-
|
32
|
-
content = File.read(state_file_location)
|
33
|
-
data = JSON.parse(content)
|
34
|
-
expect(data['logs_monitored']).to eq([first_logger_updated_path])
|
35
|
-
|
36
|
-
second_logger = ScoutTestLogger.new('/tmp/second_file.log')
|
37
|
-
second_logger_basename = File.basename(second_logger.instance_variable_get(:@logdev).filename.to_s)
|
38
|
-
second_logger_updated_path = File.join(context.config.value('logs_proxy_log_dir'), second_logger_basename)
|
39
|
-
TestLoggerWrapper.logger = second_logger
|
40
|
-
|
41
|
-
similuate_railtie
|
42
|
-
|
43
|
-
content = File.read(state_file_location)
|
44
|
-
data = JSON.parse(content)
|
45
|
-
|
46
|
-
expect(data['logs_monitored'].sort).to eq([first_logger_updated_path, second_logger_updated_path])
|
47
|
-
|
48
|
-
# Need to wait for the delay first health check, next monitor interval to restart the collector, and then for
|
49
|
-
# the collector to restart
|
50
|
-
sleep 25
|
51
|
-
wait_for_process_with_timeout!('otelcol-contrib', 20)
|
52
|
-
|
53
|
-
expect(`pgrep otelcol-contrib --runstates D,R,S`).not_to be_empty
|
54
|
-
new_collector_pid = File.read(collector_pid_location)
|
55
|
-
|
56
|
-
# Should have restarted the collector based on the change
|
57
|
-
expect(new_collector_pid).not_to eq(collector_pid)
|
58
|
-
end
|
59
|
-
|
60
|
-
private
|
61
|
-
|
62
|
-
def similuate_railtie
|
63
|
-
context = ScoutApm::Logging::MonitorManager.instance.context
|
64
|
-
|
65
|
-
ScoutApm::Logging::Loggers::Capture.new(context).setup!
|
66
|
-
ScoutApm::Logging::MonitorManager.new.setup!
|
67
|
-
end
|
68
|
-
end
|
@@ -1,49 +0,0 @@
|
|
1
|
-
require 'spec_helper'
|
2
|
-
|
3
|
-
require_relative '../../../../../lib/scout_apm/logging/monitor/collector/downloader'
|
4
|
-
|
5
|
-
describe ScoutApm::Logging::Collector::Downloader do
|
6
|
-
it 'should validate checksum, and correct download if neccessary' do
|
7
|
-
ENV['SCOUT_LOGS_MONITOR'] = 'true'
|
8
|
-
ENV['SCOUT_LOGS_MONITORED'] = '["/tmp/test.log"]'
|
9
|
-
|
10
|
-
otelcol_contrib_path = '/tmp/scout_apm/otelcol-contrib'
|
11
|
-
ScoutApm::Logging::Utils.ensure_directory_exists(otelcol_contrib_path)
|
12
|
-
|
13
|
-
File.write(otelcol_contrib_path, 'fake content')
|
14
|
-
|
15
|
-
ScoutApm::Logging::MonitorManager.instance.context.logger.info "Time start: #{Time.now}"
|
16
|
-
ScoutApm::Logging::MonitorManager.instance.setup!
|
17
|
-
ScoutApm::Logging::MonitorManager.instance.context.logger.info "Time after setup: #{Time.now}"
|
18
|
-
|
19
|
-
# Give the process time to initialize, download the collector, and start it
|
20
|
-
wait_for_process_with_timeout!('otelcol-contrib', 20)
|
21
|
-
|
22
|
-
download_time = File.mtime(otelcol_contrib_path)
|
23
|
-
|
24
|
-
expect(`pgrep otelcol-contrib --runstates D,R,S`).not_to be_empty
|
25
|
-
|
26
|
-
ENV['SCOUT_LOGS_MONITOR'] = 'false'
|
27
|
-
|
28
|
-
ScoutApm::Logging::MonitorManager.new.setup!
|
29
|
-
|
30
|
-
sleep 5 # Give the process time to exit
|
31
|
-
|
32
|
-
expect(File.exist?(ScoutApm::Logging::MonitorManager.instance.context.config.value('monitor_pid_file'))).to be_falsey
|
33
|
-
expect(`pgrep otelcol-contrib --runstates D,R,S`).to be_empty
|
34
|
-
expect(`pgrep scout_apm_log_monitor --runstates D,R,S`).to be_empty
|
35
|
-
|
36
|
-
ENV['SCOUT_LOGS_MONITOR'] = 'true'
|
37
|
-
|
38
|
-
ScoutApm::Logging::MonitorManager.new.setup!
|
39
|
-
|
40
|
-
# Give the process time to exit, and for the healthcheck to restart it
|
41
|
-
wait_for_process_with_timeout!('otelcol-contrib', 30)
|
42
|
-
|
43
|
-
expect(`pgrep otelcol-contrib --runstates D,R,S`).not_to be_empty
|
44
|
-
|
45
|
-
recheck_time = File.mtime(otelcol_contrib_path)
|
46
|
-
|
47
|
-
expect(download_time).to eq(recheck_time)
|
48
|
-
end
|
49
|
-
end
|
@@ -1,29 +0,0 @@
|
|
1
|
-
require 'spec_helper'
|
2
|
-
|
3
|
-
require_relative '../../../lib/scout_apm/logging/monitor/monitor'
|
4
|
-
|
5
|
-
describe ScoutApm::Logging::Monitor do
|
6
|
-
it 'should recreate collector process on healthcheck if it has exited' do
|
7
|
-
ENV['SCOUT_MONITOR_INTERVAL'] = '10'
|
8
|
-
ENV['SCOUT_MONITOR_INTERVAL_DELAY'] = '10'
|
9
|
-
ENV['SCOUT_LOGS_MONITOR'] = 'true'
|
10
|
-
ENV['SCOUT_LOGS_MONITORED'] = '["/tmp/test.log"]'
|
11
|
-
|
12
|
-
ScoutApm::Logging::Utils.ensure_directory_exists('/tmp/scout_apm/scout_apm_log_monitor.pid')
|
13
|
-
|
14
|
-
ScoutApm::Logging::MonitorManager.instance.context.logger.info "Time start: #{Time.now}"
|
15
|
-
ScoutApm::Logging::MonitorManager.instance.setup!
|
16
|
-
ScoutApm::Logging::MonitorManager.instance.context.logger.info "Time after setup: #{Time.now}"
|
17
|
-
|
18
|
-
# Give the process time to initialize, download the collector, and start it
|
19
|
-
wait_for_process_with_timeout!('otelcol-contrib', 20)
|
20
|
-
|
21
|
-
expect(`pgrep otelcol-contrib --runstates D,R,S`).not_to be_empty
|
22
|
-
|
23
|
-
# Bypass gracefull shutdown
|
24
|
-
`pkill -9 otelcol-contrib`
|
25
|
-
|
26
|
-
# Give the process time to exit, and for the healthcheck to restart it
|
27
|
-
wait_for_process_with_timeout!('otelcol-contrib', 30)
|
28
|
-
end
|
29
|
-
end
|
@@ -1,31 +0,0 @@
|
|
1
|
-
require 'spec_helper'
|
2
|
-
|
3
|
-
require_relative '../../../lib/scout_apm/logging/monitor/monitor'
|
4
|
-
|
5
|
-
describe ScoutApm::Logging::Monitor do
|
6
|
-
it "Should not restart the collector if the state hasn't changed" do
|
7
|
-
ENV['SCOUT_MONITOR_INTERVAL'] = '10'
|
8
|
-
ENV['SCOUT_MONITOR_INTERVAL_DELAY'] = '10'
|
9
|
-
ENV['SCOUT_LOGS_MONITOR'] = 'true'
|
10
|
-
ENV['SCOUT_LOGS_MONITORED'] = '["/tmp/test.log"]'
|
11
|
-
|
12
|
-
context = ScoutApm::Logging::MonitorManager.instance.context
|
13
|
-
collector_pid_location = context.config.value('collector_pid_file')
|
14
|
-
ScoutApm::Logging::MonitorManager.instance.context.logger.info "Time start: #{Time.now}"
|
15
|
-
ScoutApm::Logging::MonitorManager.instance.setup!
|
16
|
-
ScoutApm::Logging::MonitorManager.instance.context.logger.info "Time after setup: #{Time.now}"
|
17
|
-
# Give the process time to initialize, download the collector, and start it
|
18
|
-
wait_for_process_with_timeout!('otelcol-contrib', 20)
|
19
|
-
|
20
|
-
expect(`pgrep otelcol-contrib --runstates D,R,S`).not_to be_empty
|
21
|
-
collector_pid = File.read(collector_pid_location)
|
22
|
-
|
23
|
-
# Give time for the monitor interval to run.
|
24
|
-
sleep 30
|
25
|
-
|
26
|
-
expect(`pgrep otelcol-contrib --runstates D,R,S`).not_to be_empty
|
27
|
-
second_read_pid = File.read(collector_pid_location)
|
28
|
-
|
29
|
-
expect(second_read_pid).to eq(collector_pid)
|
30
|
-
end
|
31
|
-
end
|
@@ -1,45 +0,0 @@
|
|
1
|
-
require 'spec_helper'
|
2
|
-
|
3
|
-
require_relative '../../../lib/scout_apm/logging/monitor/monitor'
|
4
|
-
|
5
|
-
describe ScoutApm::Logging::Monitor do
|
6
|
-
it 'should use previous collector setup if monitor daemon exits' do
|
7
|
-
ENV['SCOUT_LOGS_MONITOR'] = 'true'
|
8
|
-
ENV['SCOUT_LOGS_MONITORED'] = '["/tmp/test.log"]'
|
9
|
-
|
10
|
-
monitor_pid_location = ScoutApm::Logging::MonitorManager.instance.context.config.value('monitor_pid_file')
|
11
|
-
collector_pid_location = ScoutApm::Logging::MonitorManager.instance.context.config.value('collector_pid_file')
|
12
|
-
ScoutApm::Logging::Utils.ensure_directory_exists(monitor_pid_location)
|
13
|
-
|
14
|
-
ScoutApm::Logging::MonitorManager.instance.context.logger.info "Time start: #{Time.now}"
|
15
|
-
ScoutApm::Logging::MonitorManager.instance.setup!
|
16
|
-
ScoutApm::Logging::MonitorManager.instance.context.logger.info "Time after setup: #{Time.now}"
|
17
|
-
|
18
|
-
# Give the process time to initialize, download the collector, and start it
|
19
|
-
wait_for_process_with_timeout!('otelcol-contrib', 20)
|
20
|
-
|
21
|
-
monitor_pid = File.read(monitor_pid_location)
|
22
|
-
|
23
|
-
otelcol_pid = `pgrep otelcol-contrib --runstates D,R,S`.strip!
|
24
|
-
stored_otelcol_pid = File.read(collector_pid_location)
|
25
|
-
expect(otelcol_pid).to eq(stored_otelcol_pid)
|
26
|
-
|
27
|
-
`kill -9 #{monitor_pid}`
|
28
|
-
|
29
|
-
# Create a separate monitor manager instance, or else we won't reload
|
30
|
-
# the configuraiton state.
|
31
|
-
ScoutApm::Logging::MonitorManager.new.setup!
|
32
|
-
|
33
|
-
sleep 5
|
34
|
-
|
35
|
-
expect(`pgrep -f /app/bin/scout_apm_logging_monitor --runstates D,R,S`).not_to be_empty
|
36
|
-
|
37
|
-
new_monitor_pid = File.read(monitor_pid_location)
|
38
|
-
expect(new_monitor_pid).not_to eq(monitor_pid)
|
39
|
-
|
40
|
-
should_be_same_otelcol_pid = `pgrep otelcol-contrib --runstates D,R,S`.strip!
|
41
|
-
should_be_same_stored_otelcol_pid = File.read(collector_pid_location)
|
42
|
-
expect(should_be_same_otelcol_pid).to eq(otelcol_pid)
|
43
|
-
expect(should_be_same_stored_otelcol_pid).to eq(stored_otelcol_pid)
|
44
|
-
end
|
45
|
-
end
|