smart_proxy_reports 0.0.5
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +7 -0
- data/LICENSE +675 -0
- data/README.md +183 -0
- data/bundler.d/reports.rb +1 -0
- data/lib/smart_proxy_reports/ansible_processor.rb +149 -0
- data/lib/smart_proxy_reports/processor.rb +103 -0
- data/lib/smart_proxy_reports/puppet_processor.rb +142 -0
- data/lib/smart_proxy_reports/reports.rb +30 -0
- data/lib/smart_proxy_reports/reports_api.rb +55 -0
- data/lib/smart_proxy_reports/reports_http_config.ru +10 -0
- data/lib/smart_proxy_reports/spooled_http_client.rb +102 -0
- data/lib/smart_proxy_reports/version.rb +5 -0
- data/lib/smart_proxy_reports.rb +2 -0
- data/settings.d/reports.yml.example +22 -0
- metadata +85 -0
data/README.md
ADDED
@@ -0,0 +1,183 @@
|
|
1
|
+
Smart Proxy Reports
|
2
|
+
===================
|
3
|
+
|
4
|
+
Transforms configuration and security management reports into Foreman-friendly
|
5
|
+
JSON and sends them to a Foreman instance. For more information about Foreman
|
6
|
+
JSON report format, visit
|
7
|
+
[foreman_host_reports](https://github.com/theforeman/foreman_host_reports).
|
8
|
+
|
9
|
+
## Usage
|
10
|
+
|
11
|
+
Send a POST HTTP call to `/reports/FORMAT` where FORMAT is one of the following formats.
|
12
|
+
|
13
|
+
### Puppet
|
14
|
+
|
15
|
+
Accepts Puppet Server YAML format:
|
16
|
+
|
17
|
+
* [Example input](test/fixtures/puppet6-foreman-web.yaml)
|
18
|
+
* [Example output](test/snapshots/foreman-web.json)
|
19
|
+
|
20
|
+
## Development setup
|
21
|
+
|
22
|
+
Few words about setting up a dev setup.
|
23
|
+
|
24
|
+
### Ansible
|
25
|
+
|
26
|
+
Checkoud foreman-ansible-modules and build it via `make` command. Configure
|
27
|
+
Ansible collection path to the build directory:
|
28
|
+
|
29
|
+
[defaults]
|
30
|
+
collection_path = /home/lzap/work/foreman-ansible-modules/build
|
31
|
+
callback_whitelist = foreman
|
32
|
+
[callback_foreman]
|
33
|
+
report_type = proxy
|
34
|
+
proxy_url = http://localhost:8000/reports
|
35
|
+
verify_certs = 0
|
36
|
+
client_cert = /home/lzap/DummyX509/client-one.crt
|
37
|
+
client_key = /home/lzap/DummyX509/client-one.key
|
38
|
+
|
39
|
+
Configure Foreman Ansible callback with the correct Foreman URL:
|
40
|
+
|
41
|
+
Then call Ansible:
|
42
|
+
|
43
|
+
ANSIBLE_LOAD_CALLBACK_PLUGINS=1 ansible localhost -m ping -vvv
|
44
|
+
|
45
|
+
## Example data
|
46
|
+
|
47
|
+
For testing, there are several example data. Before importing them into Foreman, make sure to have `localhost` smart proxy and also a host named `report.example.com`. It is possible to capture example data via `incoming_save_dir` setting. Name generated files correctly and put them into the `contrib/fixtures` directory. There is a utility to use fixtures for development and testing purposes:
|
48
|
+
|
49
|
+
```
|
50
|
+
$ contrib/upload-fixture
|
51
|
+
Usage:
|
52
|
+
contrib/upload-fixture -h Display this help message
|
53
|
+
contrib/upload-fixture -u URL Proxy URL (http://localhost:8000)
|
54
|
+
contrib/upload-fixture -f FILE Fixture to upload
|
55
|
+
contrib/upload-fixture -a Upload all fixtures
|
56
|
+
|
57
|
+
$ contrib/upload-fixture -a
|
58
|
+
contrib/fixtures/ansible-copy-nochange.json: 200
|
59
|
+
contrib/fixtures/ansible-copy-success.json: 200
|
60
|
+
contrib/fixtures/ansible-package-install-failure.json: 200
|
61
|
+
contrib/fixtures/ansible-package-install-nochange.json: 200
|
62
|
+
contrib/fixtures/ansible-package-install-success.json: 200
|
63
|
+
contrib/fixtures/ansible-package-remove-failure.json: 200
|
64
|
+
contrib/fixtures/ansible-package-remove-success.json: 200
|
65
|
+
```
|
66
|
+
|
67
|
+
### Importing into Foreman directly
|
68
|
+
|
69
|
+
To import a report directly info Foreman:
|
70
|
+
|
71
|
+
```
|
72
|
+
curl -H "Accept:application/json,version=2" -H "Content-Type:application/json" -X POST -d @test/snapshots/foreman-web.json http://localhost:5000/api/v2/host_reports
|
73
|
+
```
|
74
|
+
|
75
|
+
### Puppet
|
76
|
+
|
77
|
+
To install and configure a Puppetserver on EL7, run the following:
|
78
|
+
|
79
|
+
```bash
|
80
|
+
# Install the server - modify as needed for your platform
|
81
|
+
yum -y install https://yum.puppet.com/puppet7-release-el-7.noarch.rpm
|
82
|
+
yum -y install puppetserver
|
83
|
+
# Correct $PATH in the current shell - happens on start of fresh shells automatically
|
84
|
+
source /etc/profile.d/puppet-agent.sh
|
85
|
+
# Configure the HTTP report processor
|
86
|
+
puppet config set reports store,http
|
87
|
+
puppet config set reporturl http://$HOSTNAME:8000/reports/puppet
|
88
|
+
# Enable & start the service
|
89
|
+
systemctl enable --now puppetserver
|
90
|
+
```
|
91
|
+
|
92
|
+
If you prefer to use HTTPS, set the different reporturl and configure the CA certificates according to the example below
|
93
|
+
```
|
94
|
+
# use HTTPS, without Katello the port is 8443, with Katello it's 9090
|
95
|
+
puppet config set reporturl https://$HOSTNAME:8443/reports/puppet
|
96
|
+
# install the Smart Proxy CA certificate to the Puppet's localcacert store
|
97
|
+
## first find the correct pem file
|
98
|
+
grep :ssl_ca_file /etc/foreman-proxy/settings.yml
|
99
|
+
## find the localcacert puppet storage
|
100
|
+
puppet config print --section server localcacert
|
101
|
+
## then copy the content of both pem files to each other
|
102
|
+
cp /etc/foreman-proxy/ssl_ca.pem /tmp/smart-proxy.pem
|
103
|
+
cp /etc/puppetlabs/puppet/ssl/certs/ca.pem /tmp/puppet-ca.pem
|
104
|
+
cat /tmp/smart-proxy.pem >> /etc/puppetlabs/puppet/ssl/certs/ca.pem
|
105
|
+
cat /tmp/puppet-ca.pem >> /etc/foreman-proxy/ssl_ca.pem
|
106
|
+
# restart the services
|
107
|
+
systemctl restart puppetserver
|
108
|
+
systemctl restart foreman-proxy
|
109
|
+
```
|
110
|
+
Note that this means that the Puppetserver API will trust client certificates signed by the Smart Proxy CA
|
111
|
+
certificate and will be subject to authorization defined in puppet's auth.conf, e.g. a client with the certificate
|
112
|
+
of the same cname can get a catalog for such node. That is typically not a bad thing but you need to consider the
|
113
|
+
implications in your SSL certificates layout. Similarly the proxy will now trust certificates signed by the
|
114
|
+
Puppet CA, however they are still subject to smart proxy trusted hosts authorization.
|
115
|
+
|
116
|
+
By default an agent connects to `puppet` which may not resolve. Set it to your hostname:
|
117
|
+
|
118
|
+
```bash
|
119
|
+
puppet config set server $HOSTNAME
|
120
|
+
```
|
121
|
+
|
122
|
+
You can manually trigger a puppet run by using `puppet agent -t`. You may need to look at `/var/log/puppetlabs/puppetserver/puppetserver.log` to see errors.
|
123
|
+
|
124
|
+
## Status mapping
|
125
|
+
|
126
|
+
### Puppet
|
127
|
+
|
128
|
+
* changed -> applied
|
129
|
+
* corrective_change -> applied
|
130
|
+
* failed -> failed
|
131
|
+
* failed_to_restart -> failed
|
132
|
+
* scheduled -> pending
|
133
|
+
* restarted -> other
|
134
|
+
* skipped -> other
|
135
|
+
* out_of_sync
|
136
|
+
* total
|
137
|
+
|
138
|
+
### Ansible
|
139
|
+
|
140
|
+
* applied -> applied
|
141
|
+
* failed -> failed
|
142
|
+
* skipped -> other
|
143
|
+
* pending -> pending
|
144
|
+
|
145
|
+
## Contributing
|
146
|
+
|
147
|
+
Fork and send a Pull Request. Thanks!
|
148
|
+
|
149
|
+
### Unit tests
|
150
|
+
|
151
|
+
To run unit tests:
|
152
|
+
|
153
|
+
bundle exec rake test
|
154
|
+
|
155
|
+
There are few snapshot tests which compare input JSON/YAML with snapshot fixtures. When they fail, you are asked to delete those fixtures, re-run tests, review and push the changes into git:
|
156
|
+
|
157
|
+
```
|
158
|
+
rm test/snapshots/*
|
159
|
+
bundle exec rake test
|
160
|
+
git diff
|
161
|
+
git commit
|
162
|
+
```
|
163
|
+
|
164
|
+
## License
|
165
|
+
|
166
|
+
GNU GPLv3, see LICENSE file for more information.
|
167
|
+
|
168
|
+
## Copyright
|
169
|
+
|
170
|
+
Copyright (c) 2021 Red Hat, Inc.
|
171
|
+
|
172
|
+
This program is free software: you can redistribute it and/or modify
|
173
|
+
it under the terms of the GNU General Public License as published by
|
174
|
+
the Free Software Foundation, either version 3 of the License, or
|
175
|
+
(at your option) any later version.
|
176
|
+
|
177
|
+
This program is distributed in the hope that it will be useful,
|
178
|
+
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
179
|
+
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
180
|
+
GNU General Public License for more details.
|
181
|
+
|
182
|
+
You should have received a copy of the GNU General Public License
|
183
|
+
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
@@ -0,0 +1 @@
|
|
1
|
+
gem "smart_proxy_reports"
|
@@ -0,0 +1,149 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module Proxy::Reports
|
4
|
+
class AnsibleProcessor < Processor
|
5
|
+
KEYS_TO_COPY = %w[status check_mode].freeze
|
6
|
+
|
7
|
+
def initialize(data, json_body: true)
|
8
|
+
super(data, json_body: json_body)
|
9
|
+
measure :parse do
|
10
|
+
@data = JSON.parse(data)
|
11
|
+
end
|
12
|
+
@body = {}
|
13
|
+
logger.debug "Processing report #{report_id}"
|
14
|
+
debug_payload("Input", @data)
|
15
|
+
end
|
16
|
+
|
17
|
+
def report_id
|
18
|
+
@data["uuid"] || generated_report_id
|
19
|
+
end
|
20
|
+
|
21
|
+
def process_results
|
22
|
+
@data["results"]&.each do |result|
|
23
|
+
process_facts(result)
|
24
|
+
process_level(result)
|
25
|
+
process_message(result)
|
26
|
+
process_keywords(result)
|
27
|
+
end
|
28
|
+
@data["results"]
|
29
|
+
rescue StandardError => e
|
30
|
+
logger.error "Unable to parse results", e
|
31
|
+
@data["results"]
|
32
|
+
end
|
33
|
+
|
34
|
+
def process
|
35
|
+
measure :process do
|
36
|
+
@body["format"] = "ansible"
|
37
|
+
@body["id"] = report_id
|
38
|
+
@body["host"] = hostname_from_config || @data["host"]
|
39
|
+
@body["proxy"] = Proxy::Reports::Plugin.settings.reported_proxy_hostname
|
40
|
+
@body["reported_at"] = @data["reported_at"]
|
41
|
+
@body["results"] = process_results
|
42
|
+
@body["keywords"] = keywords
|
43
|
+
@body["telemetry"] = telemetry
|
44
|
+
@body["errors"] = errors if errors?
|
45
|
+
KEYS_TO_COPY.each do |key|
|
46
|
+
@body[key] = @data[key]
|
47
|
+
end
|
48
|
+
end
|
49
|
+
end
|
50
|
+
|
51
|
+
def build_report
|
52
|
+
process
|
53
|
+
if debug_payload?
|
54
|
+
logger.debug { JSON.pretty_generate(@body) }
|
55
|
+
end
|
56
|
+
build_report_root(
|
57
|
+
format: "ansible",
|
58
|
+
version: 1,
|
59
|
+
host: @body["host"],
|
60
|
+
reported_at: @body["reported_at"],
|
61
|
+
statuses: process_statuses,
|
62
|
+
proxy: @body["proxy"],
|
63
|
+
body: @body,
|
64
|
+
keywords: @body["keywords"],
|
65
|
+
)
|
66
|
+
end
|
67
|
+
|
68
|
+
def spool_report
|
69
|
+
report_hash = build_report
|
70
|
+
debug_payload("Output", report_hash)
|
71
|
+
payload = measure :format do
|
72
|
+
report_hash.to_json
|
73
|
+
end
|
74
|
+
SpooledHttpClient.instance.spool(report_id, payload)
|
75
|
+
end
|
76
|
+
|
77
|
+
private
|
78
|
+
|
79
|
+
def process_facts(result)
|
80
|
+
# TODO: add fact processing and sending to the fact endpoint
|
81
|
+
result["result"]["ansible_facts"] = {}
|
82
|
+
end
|
83
|
+
|
84
|
+
def process_keywords(result)
|
85
|
+
if result["failed"]
|
86
|
+
add_keywords("HasFailure", "AnsibleTaskFailed:#{result["task"]["action"]}")
|
87
|
+
elsif result["result"]["changed"]
|
88
|
+
add_keywords("HasChange")
|
89
|
+
end
|
90
|
+
end
|
91
|
+
|
92
|
+
def process_level(result)
|
93
|
+
if result["failed"]
|
94
|
+
result["level"] = "err"
|
95
|
+
elsif result["result"]["changed"]
|
96
|
+
result["level"] = "notice"
|
97
|
+
else
|
98
|
+
result["level"] = "info"
|
99
|
+
end
|
100
|
+
end
|
101
|
+
|
102
|
+
def process_message(result)
|
103
|
+
msg = "N/A"
|
104
|
+
return result["friendly_message"] = msg if result["task"].nil? || result["task"]["action"].nil?
|
105
|
+
return result["friendly_message"] = result["result"]["msg"] if result["failed"]
|
106
|
+
result_tree = result["result"]
|
107
|
+
task_tree = result["task"]
|
108
|
+
raise("Report do not contain required 'results/result' element") unless result_tree
|
109
|
+
raise("Report do not contain required 'results/task' element") unless task_tree
|
110
|
+
module_args_tree = result_tree.dig("invocation", "module_args")
|
111
|
+
|
112
|
+
case task_tree["action"]
|
113
|
+
when "ansible.builtin.package", "package"
|
114
|
+
detail = result_tree["results"] || result_tree["msg"] || "No details"
|
115
|
+
msg = "Package(s) #{module_args_tree["name"].join(",")} are #{module_args_tree["state"]}: #{detail}"
|
116
|
+
when "ansible.builtin.template", "template"
|
117
|
+
msg = "Render template #{module_args_tree["_original_basename"]} to #{result_tree["dest"]}"
|
118
|
+
when "ansible.builtin.service", "service"
|
119
|
+
msg = "Service #{result_tree["name"]} is #{result_tree["state"]} and enabled: #{result_tree["enabled"]}"
|
120
|
+
when "ansible.builtin.group", "group"
|
121
|
+
msg = "User group #{result_tree["name"]} is #{result_tree["state"]} with gid: #{result_tree["gid"]}"
|
122
|
+
when "ansible.builtin.user", "user"
|
123
|
+
msg = "User #{result_tree["name"]} is #{result_tree["state"]} with uid: #{result_tree["uid"]}"
|
124
|
+
when "ansible.builtin.cron", "cron"
|
125
|
+
msg = "Cron job: #{module_args_tree["minute"]} #{module_args_tree["hour"]} #{module_args_tree["day"]} #{module_args_tree["month"]} #{module_args_tree["weekday"]} #{module_args_tree["job"]} and disabled: #{module_args_tree["disabled"]}"
|
126
|
+
when "ansible.builtin.copy", "copy"
|
127
|
+
msg = "Copy #{module_args_tree["_original_basename"]} to #{result_tree["dest"]}"
|
128
|
+
when "ansible.builtin.command", "ansible.builtin.shell", "command", "shell"
|
129
|
+
msg = result_tree["stdout_lines"]
|
130
|
+
end
|
131
|
+
rescue StandardError => e
|
132
|
+
logger.debug "Unable to parse result (#{e.message}): #{result.inspect}"
|
133
|
+
ensure
|
134
|
+
result["friendly_message"] = msg
|
135
|
+
end
|
136
|
+
|
137
|
+
def process_statuses
|
138
|
+
{
|
139
|
+
"applied" => @body["status"]["applied"],
|
140
|
+
"failed" => @body["status"]["failed"],
|
141
|
+
"pending" => @body["status"]["pending"] || 0, # It's only present in check mode
|
142
|
+
"other" => @body["status"]["skipped"],
|
143
|
+
}
|
144
|
+
rescue StandardError => e
|
145
|
+
logger.error "Unable to process statuses", e
|
146
|
+
{ "applied" => 0, "failed" => 0, "pending" => 0, "other" => 0 }
|
147
|
+
end
|
148
|
+
end
|
149
|
+
end
|
@@ -0,0 +1,103 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
require "pp"
|
3
|
+
require "proxy/log"
|
4
|
+
|
5
|
+
module Proxy::Reports
|
6
|
+
class Processor
|
7
|
+
include ::Proxy::Log
|
8
|
+
|
9
|
+
def self.new_processor(format, data, json_body: true)
|
10
|
+
case format
|
11
|
+
when "puppet"
|
12
|
+
PuppetProcessor.new(data, json_body: json_body)
|
13
|
+
when "ansible"
|
14
|
+
AnsibleProcessor.new(data, json_body: json_body)
|
15
|
+
else
|
16
|
+
NotImplementedError.new
|
17
|
+
end
|
18
|
+
end
|
19
|
+
|
20
|
+
def initialize(*, json_body: true)
|
21
|
+
@keywords_set = {}
|
22
|
+
@errors = []
|
23
|
+
@json_body = json_body
|
24
|
+
end
|
25
|
+
|
26
|
+
def generated_report_id
|
27
|
+
@generated_report_id ||= SecureRandom.uuid
|
28
|
+
end
|
29
|
+
|
30
|
+
def hostname_from_config
|
31
|
+
@hostname_from_config ||= Proxy::Reports::Plugin.settings.override_hostname
|
32
|
+
end
|
33
|
+
|
34
|
+
def build_report_root(format:, version:, host:, reported_at:, statuses:, proxy:, body:, keywords:)
|
35
|
+
{
|
36
|
+
"host_report" => {
|
37
|
+
"format" => format,
|
38
|
+
"version" => version,
|
39
|
+
"host" => host,
|
40
|
+
"reported_at" => reported_at,
|
41
|
+
"proxy" => proxy,
|
42
|
+
"body" => @json_body ? body.to_json : body,
|
43
|
+
"keywords" => keywords,
|
44
|
+
}.merge(statuses),
|
45
|
+
}
|
46
|
+
# TODO add metric with total time
|
47
|
+
end
|
48
|
+
|
49
|
+
def debug_payload?
|
50
|
+
Proxy::Reports::Plugin.settings.debug_payload
|
51
|
+
end
|
52
|
+
|
53
|
+
def debug_payload(prefix, data)
|
54
|
+
return unless debug_payload?
|
55
|
+
logger.debug { "#{prefix}: #{data.pretty_inspect}" }
|
56
|
+
end
|
57
|
+
|
58
|
+
def add_keywords(*keywords)
|
59
|
+
keywords.each do |keyword|
|
60
|
+
@keywords_set[keyword] = true
|
61
|
+
end
|
62
|
+
end
|
63
|
+
|
64
|
+
def keywords
|
65
|
+
@keywords_set.keys.to_a rescue []
|
66
|
+
end
|
67
|
+
|
68
|
+
attr_reader :errors
|
69
|
+
|
70
|
+
def log_error(message)
|
71
|
+
@errors << message.to_s
|
72
|
+
end
|
73
|
+
|
74
|
+
def errors?
|
75
|
+
@errors&.any?
|
76
|
+
end
|
77
|
+
|
78
|
+
# TODO support multiple metrics and adding total time
|
79
|
+
attr_reader :telemetry
|
80
|
+
|
81
|
+
def measure(metric)
|
82
|
+
t1 = Process.clock_gettime(Process::CLOCK_MONOTONIC)
|
83
|
+
yield
|
84
|
+
ensure
|
85
|
+
t2 = Process.clock_gettime(Process::CLOCK_MONOTONIC)
|
86
|
+
@telemetry ||= {}
|
87
|
+
@telemetry[metric.to_s] = (t2 - t1) * 1000
|
88
|
+
end
|
89
|
+
|
90
|
+
def telemetry_as_string
|
91
|
+
result = []
|
92
|
+
telemetry.each do |key, value|
|
93
|
+
result << "#{key}=#{value.round(1)}ms"
|
94
|
+
end
|
95
|
+
result.join(", ")
|
96
|
+
end
|
97
|
+
|
98
|
+
def spool_report
|
99
|
+
super
|
100
|
+
logger.debug "Spooled #{report_id}: #{telemetry_as_string}"
|
101
|
+
end
|
102
|
+
end
|
103
|
+
end
|
@@ -0,0 +1,142 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module Proxy::Reports
|
4
|
+
class PuppetProcessor < Processor
|
5
|
+
YAML_CLEAN = /!ruby\/object.*$/.freeze
|
6
|
+
KEYS_TO_COPY = %w[report_format puppet_version environment metrics].freeze
|
7
|
+
MAX_EVAL_TIMES = 29
|
8
|
+
|
9
|
+
def initialize(data, json_body: true)
|
10
|
+
super(data, json_body: json_body)
|
11
|
+
measure :parse do
|
12
|
+
if Gem::Version.new(RUBY_VERSION) < Gem::Version.new("2.6.0")
|
13
|
+
# Ruby 2.5 or older does not have permitted_classes argument available
|
14
|
+
@data = YAML.load(data.gsub(YAML_CLEAN, ""))
|
15
|
+
else
|
16
|
+
@data = YAML.safe_load(data.gsub(YAML_CLEAN, ""), permitted_classes: [Symbol, Time, Date])
|
17
|
+
end
|
18
|
+
end
|
19
|
+
raise("No content") unless @data
|
20
|
+
@body = {}
|
21
|
+
@evaluation_times = []
|
22
|
+
logger.debug "Processing report #{report_id}"
|
23
|
+
debug_payload("Input", @data)
|
24
|
+
end
|
25
|
+
|
26
|
+
def report_id
|
27
|
+
@data["transaction_uuid"] || generated_report_id
|
28
|
+
end
|
29
|
+
|
30
|
+
def process_logs
|
31
|
+
logs = []
|
32
|
+
@data["logs"]&.each do |log|
|
33
|
+
logs << [log["level"]&.to_s, log["source"], log["message"]]
|
34
|
+
end
|
35
|
+
logs
|
36
|
+
rescue StandardError => e
|
37
|
+
logger.error "Unable to parse logs", e
|
38
|
+
logs
|
39
|
+
end
|
40
|
+
|
41
|
+
def process_resource_statuses
|
42
|
+
statuses = []
|
43
|
+
@data["resource_statuses"]&.each_pair do |key, value|
|
44
|
+
statuses << key
|
45
|
+
@evaluation_times << [key, value["evaluation_time"]]
|
46
|
+
# failures
|
47
|
+
add_keywords("PuppetResourceFailed:#{key}", "PuppetHasFailure") if value["failed"] || value["failed_to_restart"]
|
48
|
+
value["events"]&.each do |event|
|
49
|
+
add_keywords("PuppetResourceFailed:#{key}", "PuppetHasFailure") if event["status"] == "failed"
|
50
|
+
add_keywords("PuppetHasCorrectiveChange") if event["corrective_change"]
|
51
|
+
end
|
52
|
+
# changes
|
53
|
+
add_keywords("PuppetHasChange") if value["changed"]
|
54
|
+
add_keywords("PuppetHasChange") if value["change_count"] && value["change_count"] > 0
|
55
|
+
# changes
|
56
|
+
add_keywords("PuppetIsOutOfSync") if value["out_of_sync"]
|
57
|
+
add_keywords("PuppetIsOutOfSync") if value["out_of_sync_count"] && value["out_of_sync_count"] > 0
|
58
|
+
# skips
|
59
|
+
add_keywords("PuppetHasSkips") if value["skipped"]
|
60
|
+
# corrective changes
|
61
|
+
add_keywords("PuppetHasCorrectiveChange") if value["corrective_change"]
|
62
|
+
end
|
63
|
+
statuses
|
64
|
+
rescue StandardError => e
|
65
|
+
logger.error "Unable to parse resource_statuses", e
|
66
|
+
statuses
|
67
|
+
end
|
68
|
+
|
69
|
+
def process_evaluation_times
|
70
|
+
@evaluation_times.sort! { |a, b| b[1] <=> a[1] }
|
71
|
+
if @evaluation_times.count > MAX_EVAL_TIMES
|
72
|
+
others = @evaluation_times[MAX_EVAL_TIMES..@evaluation_times.count - 1].sum { |x| x[1] }
|
73
|
+
@evaluation_times = @evaluation_times[0..MAX_EVAL_TIMES - 1]
|
74
|
+
@evaluation_times << ["Others", others] if others > 0.0001
|
75
|
+
end
|
76
|
+
@evaluation_times
|
77
|
+
rescue StandardError => e
|
78
|
+
logger.error "Unable to process evaluation_times", e
|
79
|
+
[]
|
80
|
+
end
|
81
|
+
|
82
|
+
def process
|
83
|
+
measure :process do
|
84
|
+
@body["format"] = "puppet"
|
85
|
+
@body["id"] = report_id
|
86
|
+
@body["host"] = hostname_from_config || @data["host"]
|
87
|
+
@body["proxy"] = Proxy::Reports::Plugin.settings.reported_proxy_hostname
|
88
|
+
@body["reported_at"] = @data["time"]
|
89
|
+
KEYS_TO_COPY.each do |key|
|
90
|
+
@body[key] = @data[key]
|
91
|
+
end
|
92
|
+
@body["logs"] = process_logs
|
93
|
+
@body["resource_statuses"] = process_resource_statuses
|
94
|
+
@body["keywords"] = keywords
|
95
|
+
@body["evaluation_times"] = process_evaluation_times
|
96
|
+
@body["telemetry"] = telemetry
|
97
|
+
@body["errors"] = errors if errors?
|
98
|
+
end
|
99
|
+
end
|
100
|
+
|
101
|
+
def build_report
|
102
|
+
process
|
103
|
+
if debug_payload?
|
104
|
+
logger.debug { JSON.pretty_generate(@body) }
|
105
|
+
end
|
106
|
+
build_report_root(
|
107
|
+
format: "puppet",
|
108
|
+
version: 1,
|
109
|
+
host: @body["host"],
|
110
|
+
reported_at: @body["reported_at"],
|
111
|
+
statuses: process_statuses,
|
112
|
+
proxy: @body["proxy"],
|
113
|
+
body: @body,
|
114
|
+
keywords: @body["keywords"],
|
115
|
+
)
|
116
|
+
end
|
117
|
+
|
118
|
+
def spool_report
|
119
|
+
report_hash = build_report
|
120
|
+
debug_payload("Output", report_hash)
|
121
|
+
payload = measure :format do
|
122
|
+
report_hash.to_json
|
123
|
+
end
|
124
|
+
SpooledHttpClient.instance.spool(report_id, payload)
|
125
|
+
end
|
126
|
+
|
127
|
+
private
|
128
|
+
|
129
|
+
def process_statuses
|
130
|
+
stats = @body["metrics"]["resources"]["values"].collect { |s| [s[0], s[2]] }.to_h
|
131
|
+
{
|
132
|
+
"applied" => stats["changed"] + stats["corrective_change"],
|
133
|
+
"failed" => stats["failed"] + stats["failed_to_restart"],
|
134
|
+
"pending" => stats["scheduled"],
|
135
|
+
"other" => stats["restarted"] + stats["skipped"] + stats["out_of_sync"],
|
136
|
+
}
|
137
|
+
rescue StandardError => e
|
138
|
+
logger.error "Unable to process statuses", e
|
139
|
+
{ "applied" => 0, "failed" => 0, "pending" => 0, "other" => 0 }
|
140
|
+
end
|
141
|
+
end
|
142
|
+
end
|
@@ -0,0 +1,30 @@
|
|
1
|
+
require "socket"
|
2
|
+
|
3
|
+
module Proxy::Reports
|
4
|
+
class PluginConfiguration
|
5
|
+
def load_classes
|
6
|
+
require "smart_proxy_reports/spooled_http_client"
|
7
|
+
end
|
8
|
+
|
9
|
+
def load_dependency_injection_wirings(container, _settings)
|
10
|
+
container.singleton_dependency :reports_spool, -> {
|
11
|
+
SpooledHttpClient.instance.initialize_directory
|
12
|
+
}
|
13
|
+
end
|
14
|
+
end
|
15
|
+
|
16
|
+
class Plugin < ::Proxy::Plugin
|
17
|
+
plugin :reports, Proxy::Reports::VERSION
|
18
|
+
|
19
|
+
default_settings reported_proxy_hostname: Socket.gethostname(),
|
20
|
+
debug_payload: false,
|
21
|
+
spool_dir: "/var/lib/foreman-proxy/reports",
|
22
|
+
keep_reports: false
|
23
|
+
|
24
|
+
rackup_path File.expand_path("reports_http_config.ru", __dir__)
|
25
|
+
|
26
|
+
load_classes PluginConfiguration
|
27
|
+
load_dependency_injection_wirings PluginConfiguration
|
28
|
+
start_services :reports_spool
|
29
|
+
end
|
30
|
+
end
|
@@ -0,0 +1,55 @@
|
|
1
|
+
require "sinatra"
|
2
|
+
require "yaml"
|
3
|
+
require "smart_proxy_reports/reports"
|
4
|
+
require "smart_proxy_reports/processor"
|
5
|
+
require "smart_proxy_reports/puppet_processor"
|
6
|
+
require "smart_proxy_reports/ansible_processor"
|
7
|
+
|
8
|
+
module Proxy::Reports
|
9
|
+
class Api < ::Sinatra::Base
|
10
|
+
include ::Proxy::Log
|
11
|
+
include ::Proxy::Util
|
12
|
+
helpers ::Proxy::Helpers
|
13
|
+
authorize_with_trusted_hosts
|
14
|
+
authorize_with_ssl_client
|
15
|
+
|
16
|
+
before do
|
17
|
+
content_type "application/json"
|
18
|
+
end
|
19
|
+
|
20
|
+
def check_content_type(format)
|
21
|
+
request_type = request.env["CONTENT_TYPE"]
|
22
|
+
if format == "puppet"
|
23
|
+
log_halt(415, "Content type must be application/x-yaml, was: #{request_type}") unless request_type.start_with?("application/x-yaml")
|
24
|
+
elsif format == "ansible"
|
25
|
+
log_halt(415, "Content type must be application/json, was: #{request_type}") unless request_type.start_with?("application/json")
|
26
|
+
else
|
27
|
+
log_halt(415, "Unknown format: #{format}")
|
28
|
+
end
|
29
|
+
end
|
30
|
+
|
31
|
+
EXTS = {
|
32
|
+
puppet: "yaml",
|
33
|
+
ansible: "json",
|
34
|
+
}.freeze
|
35
|
+
|
36
|
+
def save_payload(input, format)
|
37
|
+
filename = File.join(Proxy::Reports::Plugin.settings.incoming_save_dir, "#{format}-#{Time.now.to_f}.#{EXTS[format.to_sym]}")
|
38
|
+
File.open(filename, "w") { |f| f.write(input) }
|
39
|
+
end
|
40
|
+
|
41
|
+
post "/:format" do
|
42
|
+
format = params[:format]
|
43
|
+
log_halt(404, "Format argument not specified") unless format
|
44
|
+
check_content_type(format)
|
45
|
+
input = request.body.read
|
46
|
+
save_payload(input, format) if Proxy::Reports::Plugin.settings.incoming_save_dir
|
47
|
+
log_halt(415, "Missing body") if input.empty?
|
48
|
+
json_body = to_bool(params[:json_body], true)
|
49
|
+
processor = Processor.new_processor(format, input, json_body: json_body)
|
50
|
+
processor.spool_report
|
51
|
+
rescue => e
|
52
|
+
log_halt 415, e, "Error during report processing: #{e.message}"
|
53
|
+
end
|
54
|
+
end
|
55
|
+
end
|